scf_celebic/ex7/code/task4/accu.template/main.cpp
dino.celebic 2467b9ae03 ex7
2025-12-26 20:26:31 +01:00

105 lines
3.2 KiB
C++

// MPI code in C++.
// See [Gropp/Lusk/Skjellum, "Using MPI", p.33/41 etc.]
// and /opt/mpich/include/mpi2c++/comm.h for details
#include "geom.h"
#include "par_geom.h"
#include "vdop.h"
#include <cassert>
#include <cmath>
#include <iostream>
#include <mpi.h> // MPI
#include <omp.h> // OpenMP
using namespace std;
int main(int argc, char **argv )
{
MPI_Init(&argc, &argv);
MPI_Comm const icomm(MPI_COMM_WORLD);
omp_set_num_threads(1); // don't use OMP parallelization for a start
//
{
int np;
MPI_Comm_size(icomm, &np);
// assert(4 == np); // example is only provided for 4 MPI processes
}
// #####################################################################
// ---- Read the f.e. mesh and the mapping of elements to MPI processes
//Mesh const mesh_c("square_4.txt"); // Files square_4.txt and square_4_sd.txt are needed
ParMesh const mesh("square",icomm);
int const numprocs = mesh.NumProcs();
int const myrank = mesh.MyRank();
if ( 0 == myrank ) {
cout << "\n There are " << numprocs << " processes running.\n \n";
}
int const check_rank=1; // choose the MPI process you would like to check the mesh
//if ( check_rank == myrank ) mesh.Debug();
//if ( check_rank == myrank ) mesh.DebugEdgeBased();
// ##########################################################################
// ---- allocate local vectors and check skalar product and vector accumulation
if (check_rank==myrank) {printf("\n\n-------------- Task 9 --------------\n\n");}
if (check_rank==myrank) cout << "Mesh coordinates: " << mesh.GetCoords() << endl << endl;
MPI_Barrier(icomm);
vector<double> xl(mesh.Nnodes(), 1.0);
// for visualization I had to type in terminal:
// export LIBGL_ALWAYS_SOFTWARE=1
if (check_rank==myrank) mesh.Visualize(xl);
double ss = mesh.dscapr(xl,xl);
cout << myrank << " : scalar : " << ss << endl;
mesh.VecAccu(xl);
if (check_rank==myrank) mesh.Visualize(xl);
MPI_Barrier(icomm);
if (check_rank==myrank) {printf("\n\n-------------- Task 10 --------------\n\n");}
vector<int> y(mesh.Nnodes(), 1);
mesh.VecAccuInt(y);
if (check_rank==myrank) {
printf("Accumulated integer vector y:\n");
for (int i : y) {
cout << i << " ";
}
}
MPI_Barrier(icomm);
if (check_rank==myrank) {printf("\n\n-------------- Task 11 --------------\n\n");}
int global_nodes = mesh.GlobalNodes();
if (check_rank==myrank) cout << "Global nodes: " << global_nodes << endl;
MPI_Barrier(icomm);
if (check_rank==myrank) {printf("\n\n-------------- Task 12 --------------\n\n");}
// Set xl to 1s vector again
for (size_t k=0; k<xl.size(); ++k)
{
xl[k] = 1.0;
}
if (check_rank==myrank) mesh.Visualize(xl);
mesh.Average(xl);
if (check_rank==myrank) mesh.Visualize(xl);
// -------------- Task 13 --------------
// Should work with 2, 4 and 6 subdomains (change run target in GCC_default.mk)
// Check subdomains with different values for check_rank (0-5)
MPI_Finalize();
return 0;
}