117 lines
3.3 KiB
C++
117 lines
3.3 KiB
C++
#include "VecFuncs.h"
|
|
#include <vector>
|
|
#include <iostream>
|
|
#include <cassert>
|
|
#include <cfloat>
|
|
|
|
void DebugVector(const std::vector<double>& xin, MPI_Comm icomm)
|
|
{
|
|
int rank, size;
|
|
MPI_Comm_rank(icomm, &rank);
|
|
MPI_Comm_size(icomm, &size);
|
|
int ierr;
|
|
|
|
int active_rank = -1;
|
|
|
|
for (int step = 0; step < size; ++step)
|
|
{
|
|
if (rank == 0)
|
|
{
|
|
std::cout << "Enter rank to display vector: " << std::endl;
|
|
std::cin >> active_rank;
|
|
}
|
|
|
|
ierr = MPI_Bcast(&active_rank, 1, MPI_INT, 0, icomm);
|
|
assert(ierr == 0);
|
|
|
|
MPI_Barrier(icomm);
|
|
|
|
if (rank == active_rank)
|
|
{
|
|
std::cout << "Output from process " << rank << std::endl;
|
|
|
|
for (size_t i = 0; i < xin.size(); ++i)
|
|
{
|
|
std::cout << "xin[" << i << "] = " << xin[i] << std::endl;
|
|
}
|
|
|
|
std::cout << std::endl;
|
|
}
|
|
|
|
MPI_Barrier(icomm);
|
|
}
|
|
}
|
|
|
|
double par_scalar(const std::vector<double> &x, const std::vector<double> &y, const MPI_Comm &comm) {
|
|
assert(x.size()==y.size());
|
|
|
|
double local_sum = 0.0;
|
|
for (int k = 0; k < x.size(); ++k) {
|
|
local_sum += x[k] * y[k];
|
|
}
|
|
|
|
double global_sum = 0.0;
|
|
int mpi_error = MPI_Allreduce(&local_sum, &global_sum, 1, MPI_DOUBLE, MPI_SUM, comm);
|
|
assert(mpi_error == 0);
|
|
|
|
return global_sum;
|
|
}
|
|
|
|
void par_minmax(std::vector<double> &x, double &min_value, double &max_value, const MPI_Comm &icomm)
|
|
{
|
|
int myrank;
|
|
MPI_Comm_rank(icomm, &myrank);
|
|
|
|
int local_n = x.size();
|
|
int global_offset = myrank * local_n;
|
|
|
|
struct {double value; int idx;} local_min, local_max, global_min, global_max; // global index
|
|
|
|
local_min.value = local_max.value = x[0];
|
|
local_min.idx = local_max.idx = global_offset;
|
|
|
|
// finding local min/max with the corresponding global index
|
|
for (int i = 1; i < local_n; ++i) {
|
|
int global_idx = global_offset + i;
|
|
if (x[i] < local_min.value){
|
|
local_min.value = x[i];
|
|
local_min.idx = global_idx;
|
|
}
|
|
if (x[i] > local_max.value){
|
|
local_max.value = x[i];
|
|
local_max.idx = global_idx;
|
|
}
|
|
}
|
|
|
|
// reduction to the global one including the global index (need it later for interchanging)
|
|
MPI_Allreduce(&local_min, &global_min, 1, MPI_DOUBLE_INT, MPI_MINLOC, icomm);
|
|
MPI_Allreduce(&local_max, &global_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, icomm);
|
|
min_value = global_min.value;
|
|
max_value = global_max.value;
|
|
|
|
// calculating the process and the local index for interchanging the min and max value
|
|
int rank_min = global_min.idx / local_n;
|
|
int rank_max = global_max.idx / local_n;
|
|
int local_min_idx = global_min.idx % local_n;
|
|
int local_max_idx = global_max.idx % local_n;
|
|
|
|
// interchanging
|
|
if (rank_min == rank_max){
|
|
std::swap(x[local_min_idx], x[local_max_idx]);
|
|
}
|
|
else {
|
|
double recv_value;
|
|
if (myrank == rank_min){
|
|
MPI_Sendrecv(&x[local_min_idx], 1, MPI_DOUBLE, rank_max, 0, &recv_value, 1, MPI_DOUBLE, rank_max, 0, icomm, MPI_STATUS_IGNORE);
|
|
x[local_min_idx] = recv_value;
|
|
}
|
|
if (myrank == rank_max){
|
|
MPI_Sendrecv(&x[local_max_idx], 1, MPI_DOUBLE, rank_min, 0, &recv_value, 1, MPI_DOUBLE, rank_min, 0, icomm, MPI_STATUS_IGNORE);
|
|
x[local_max_idx] = recv_value;
|
|
}
|
|
|
|
}
|
|
|
|
|
|
return;
|
|
}
|