This commit is contained in:
Lisa Pizzo 2026-01-10 15:28:21 +01:00
commit 8be3dadaa3
5 changed files with 408 additions and 0 deletions

View file

@ -0,0 +1,47 @@
#ifndef USERSET_FILE
#define USERSET_FILE
#include <cmath>
/**
* User function: f(@p x,@p y)
* @param[in] x x-coordinate of discretization point
* @param[in] y y-coordinate of discretization point
* @return value for right hand side f(@p x,@p y)
*/
double FunctF(double const x, double const y);
/**
* User function: u(@p x,@p y)
* @param[in] x x-coordinate of discretization point
* @param[in] y y-coordinate of discretization point
* @return value for solution vector u(@p x,@p y)
*/
double FunctU(double const x, double const y);
/**
* User function: f(@p x,@p y) = @f$ x^2 \sin(2.5\pi y)@f$.
* @param[in] x x-coordinate of discretization point
* @param[in] y y-coordinate of discretization point
* @return value f(@p x,@p y)
*/
inline
double fNice(double const x, double const y)
{
//return x * x * std::sin(2.5 * M_PI * y); // solution u
return std::sin(M_PI*2.5*y)*(M_PI*M_PI*2.5*2.5*x*x - 2); // -Laplacian(u)
}
/**
* User function: f(@p x,@p y) = 0$.
* @param[in] x x-coordinate of discretization point
* @param[in] y y-coordinate of discretization point
* @return value 0
*/
inline
double f_zero(double const x, double const y)
//double f_zero(double const /*x*/, double const /*y*/)
{
return 0.0 + 0.0*(x+y);
}
#endif

View file

@ -0,0 +1,122 @@
#include "vdop.h"
#include <cassert> // assert()
#include <cmath>
#include <iostream>
#include <vector>
using namespace std;
void vddiv(vector<double> & x, vector<double> const& y,
vector<double> const& z)
{
assert( x.size()==y.size() && y.size()==z.size() );
size_t n = x.size();
#pragma omp parallel for
for (size_t k = 0; k < n; ++k)
{
x[k] = y[k] / z[k];
}
return;
}
//******************************************************************************
void vdaxpy(std::vector<double> & x, std::vector<double> const& y,
double alpha, std::vector<double> const& z )
{
assert( x.size()==y.size() && y.size()==z.size() );
size_t n = x.size();
#pragma omp parallel for
for (size_t k = 0; k < n; ++k)
{
x[k] = y[k] + alpha * z[k];
}
return;
}
//******************************************************************************
double dscapr(std::vector<double> const& x, std::vector<double> const& y)
{
assert( x.size()==y.size());
size_t n = x.size();
double s = 0.0;
#pragma omp parallel for reduction(+:s)
for (size_t k = 0; k < n; ++k)
{
s += x[k] * y[k];
}
return s;
}
//******************************************************************************
bool CompareVectors(std::vector<double> const& x, int const n, double const y[], double const eps)
{
bool bn = (static_cast<int>(x.size())==n);
if (!bn)
{
cout << "######### Error: " << "number of elements" << endl;
}
//bool bv = equal(x.cbegin(),x.cend(),y);
bool bv = equal(x.cbegin(),x.cend(),y,
[eps](double a, double b) -> bool
{ return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
);
if (!bv)
{
assert(static_cast<int>(x.size())==n);
cout << "######### Error: " << "values" << endl;
}
return bn && bv;
}
//******************************************************************************
double par_scalar(vector<double> const &x, vector<double> const &y, MPI_Comm const& icomm)
{
const double s = dscapr(x,y);
double sg;
MPI_Allreduce(&s,&sg,1,MPI_DOUBLE,MPI_SUM,icomm);
return(sg);
}
//******************************************************************************
void ExchangeAll(vector<double> const &xin, vector<double> &yout, MPI_Comm const &icomm)
{
int myrank, numprocs,ierr(-1);
MPI_Comm_rank(icomm, &myrank); // my MPI-rank
MPI_Comm_size(icomm, &numprocs);
int const N=xin.size();
int const sendcount = N/numprocs; // equal sized junks
assert(sendcount*numprocs==N); // really all junk sized?
assert(xin.size()==yout.size());
auto sendbuf = xin.data();
auto recvbuf = yout.data();
ierr = MPI_Alltoall(sendbuf, sendcount, MPI_DOUBLE,
recvbuf, sendcount, MPI_DOUBLE, icomm);
assert(0==ierr);
return;
}
//******************************************************************************
void ExchangeAllInPlace(vector<double> &xin, MPI_Comm const &icomm)
{
int myrank, numprocs,ierr(-1);
MPI_Comm_rank(icomm, &myrank); // my MPI-rank
MPI_Comm_size(icomm, &numprocs);
int const N=xin.size();
int const sendcount = N/numprocs; // equal sized junks
assert(sendcount*numprocs==N); // really all junk sized?
auto sendbuf = xin.data();
ierr = MPI_Alltoall(MPI_IN_PLACE, sendcount, MPI_DOUBLE,
sendbuf, sendcount, MPI_DOUBLE, icomm);
assert(0==ierr);
return;
}

View file

@ -0,0 +1,167 @@
#ifndef VDOP_FILE
#define VDOP_FILE
#include <iostream>
#include <mpi.h> // MPI
#include <string>
#include <vector>
/** @brief Element-wise vector divison x_k = y_k/z_k.
*
* @param[out] x target vector
* @param[in] y source vector
* @param[in] z source vector
*
*/
void vddiv(std::vector<double> &x, std::vector<double> const &y,
std::vector<double> const &z);
/** @brief Element-wise daxpy operation x(k) = y(k) + alpha*z(k).
*
* @param[out] x target vector
* @param[in] y source vector
* @param[in] alpha scalar
* @param[in] z source vector
*
*/
void vdaxpy(std::vector<double> &x, std::vector<double> const &y,
double alpha, std::vector<double> const &z );
/** @brief Calculates the Euclidean inner product of two vectors.
*
* @param[in] x vector
* @param[in] y vector
* @return Euclidean inner product @f$\langle x,y \rangle@f$
*
*/
double dscapr(std::vector<double> const &x, std::vector<double> const &y);
inline
double L2_scapr(std::vector<double> const &x, std::vector<double> const &y)
{
return dscapr(x, y) / x.size();
}
/** Parallel inner product
@param[in] x vector
@param[in] y vector
@param[in] icomm MPI communicator
@return resulting Euclidian inner product <x,y>
*/
double par_scalar(std::vector<double> const &x, std::vector<double> const &y,
MPI_Comm const& icomm=MPI_COMM_WORLD);
/* ReadId : Input and broadcast of an integer */
inline
int ReadIn(std::string const &ss = std::string(), MPI_Comm const &icomm = MPI_COMM_WORLD)
{
MPI_Barrier(icomm);
int myrank; /* my rank number */
MPI_Comm_rank(icomm, &myrank);
int id;
if (myrank == 0) {
std::cout << "\n\n " << ss << " : Which process do you want to debug ? \n";
std::cin >> id;
}
MPI_Bcast(&id, 1, MPI_INT, 0, icomm);
return id;
}
/**
* Print entries of a vector to standard output.
*
* @param[in] v vector values
* @param[in] ss string containing the vector name
* @param[in] icomm communicator group for MPI
*
*/
template <class T>
void DebugVector(std::vector<T> const &v, std::string const &ss = std::string(), MPI_Comm const &icomm = MPI_COMM_WORLD)
{
MPI_Barrier(icomm);
std::cout.flush();
int numprocs; /* # processes */
MPI_Comm_size(icomm, &numprocs);
int myrank; /* my rank number */
MPI_Comm_rank(icomm, &myrank);
int readid = ReadIn(ss); /* Read readid */
while ( (0 <= readid) && (readid < numprocs) ) {
if (myrank == readid) {
std::cout << "\n\n process " << readid;
std::cout << "\n .... " << ss << " (nnode = " << v.size() << ")\n";
for (size_t j = 0; j < v.size(); ++j) {
std::cout.setf(std::ios::right, std::ios::adjustfield);
std::cout << "[" << j << "] "<< v[j] << " ";
}
std::cout << std::endl;
std::cout.flush();
}
readid = ReadIn(ss, icomm); /* Read readid */
}
MPI_Barrier(icomm);
return;
}
/** @brief Compares an STL vector with POD vector.
*
* The accuracy criteria @f$ |x_k-y_k| < \varepsilon \left({1+0.5(|x_k|+|y_k|)}\right) @f$
* follows the book by
* <a href="https://www.springer.com/la/book/9783319446592">Stoyan/Baran</a>, p.8.
*
* @param[in] x STL vector
* @param[in] n length of POD vector
* @param[in] y POD vector
* @param[in] eps relative accuracy criteria (default := 0.0).
* @return true iff pairwise vector elements are relatively close to each other.
*
*/
bool CompareVectors(std::vector<double> const &x, int n, double const y[], double const eps = 0.0);
/** Output operator for vector
* @param[in,out] s output stream, e.g. @p cout
* @param[in] v vector
*
* @return output stream
*/
template <class T>
std::ostream& operator<<(std::ostream &s, std::vector<T> const &v)
{
for (auto vp: v)
{
s << vp << " ";
}
return s;
}
/** Exchanges equal size partions of vector @p xin with all MPI processes.
* The received data are return in vector @p yout .
*
* @param[in] xin input vector
* @param[out] yout output vector
* @param[in] icomm MPI communicator
*
*/
void ExchangeAll(std::vector<double> const &xin, std::vector<double> &yout, MPI_Comm const &icomm = MPI_COMM_WORLD);
/** Exchanges equal size partions of vector @p xin with all MPI processes.
* The received data are return in vector @p xin .
*
* @param[in,out] xin input/output vector
* @param[in] icomm MPI communicator
*
*/
void ExchangeAllInPlace(std::vector<double> &xin, MPI_Comm const &icomm = MPI_COMM_WORLD);
#endif

View file

@ -0,0 +1,52 @@
%% Visualize results
%
% flatpak run org.octave.Octave <filename>
% or
% octave --no-window-system --no-gui -qf <filename>
%
% or
%
% matlab -nosplash -nodesktop -r 'try visualize_par_results(4); catch; end; quit'
%
function visualize_par_results(nprocs)
%%
if nargin<1
nprocs = 4;
end
fprintf('# procs = %d\n',nprocs)
pre = 'uv_';
post = '.txt';
xc = []; nnodes = [];
ia = []; nelems = [];
v = [];
node_offset = 0;
elem_offset = 0;
for rank=0:nprocs-1
fname = [pre,num2str(rank,'%2u'),post];
[lxc,lia,lv] = ascii_read_meshvector(fname);
% whos lxc lia lv
nnodes = [nnodes size(lxc,1)];
nelems = [nelems size(lia,1)];
%[xc,ia,v]
xc = [xc; lxc];
v = [v ; lv ];
ia = [ia; lia+node_offset];
% node_offset
% lia = lia + node_offset
% ia = [ia; lia];
% index offsets for next subdomain
node_offset = node_offset + nnodes(end);
elem_offset = elem_offset + nelems(end);
end
% fname = 'uv.txt';
% [xc,ia,v] = ascii_read_meshvector(fname);
h = trisurf(ia, xc(:,1), xc(:,2), v);
xlabel('x'),ylabel('y'),zlabel('z')
shading interp
waitfor(h) % wait for closing the figure

View file

@ -0,0 +1,20 @@
%% Visualize results
%
% flatpak run org.octave.Octave <filename>
% or
% octave --no-window-system --no-gui -qf <filename>
%
% or
% matlab -nosplash < <filename>
clear all
clc
%%
fname = 'uv.txt';
[xc,ia,v] = ascii_read_meshvector(fname);
h = trisurf(ia, xc(:,1), xc(:,2), v);
waitfor(h) % wait for closing the figure