This commit is contained in:
Lisa Pizzo 2026-01-10 15:27:25 +01:00
commit 2af2dd4006
5 changed files with 595 additions and 0 deletions

View file

@ -0,0 +1,56 @@
#
# use GNU-Compiler tools
COMPILER=GCC_
# COMPILER=GCC_SEQ_
# alternatively from the shell
# export COMPILER=GCC_
# or, alternatively from the shell
# make COMPILER=GCC_
MAIN = main
SOURCES = ${MAIN}.cpp vdop.cpp geom.cpp par_geom.cpp\
getmatrix.cpp jacsolve.cpp userset.cpp
# cuthill_mckee_ordering.cpp
OBJECTS = $(SOURCES:.cpp=.o)
PROGRAM = ${MAIN}.${COMPILER}
# uncomment the next to lines for debugging and detailed performance analysis
CXXFLAGS += -g
# -DNDEBUG
# -pg slows down the code on my laptop when using CLANG_
LINKFLAGS += -g
#-pg
#CXXFLAGS += -Q --help=optimizers
#CXXFLAGS += -fopt-info
include ../${COMPILER}default.mk
#############################################################################
# additional specific cleaning in this directory
clean_all::
@rm -f t.dat*
#############################################################################
# special testing
# NPROCS = 4
#
TFILE = t.dat
# TTMP = t.tmp
#
graph: $(PROGRAM)
# @rm -f $(TFILE).*
# next two lines only sequentially
./$(PROGRAM)
@mv $(TFILE).000 $(TFILE)
# $(MPIRUN) $(MPIFLAGS) -np $(NPROCS) $(PROGRAM)
# @echo " "; echo "Manipulate data for graphics."; echo " "
# @cat $(TFILE).* > $(TTMP)
# @sort -b -k 2 $(TTMP) -o $(TTMP).1
# @sort -b -k 1 $(TTMP).1 -o $(TTMP).2
# @awk -f nl.awk $(TTMP).2 > $(TFILE)
# @rm -f $(TTMP).* $(TTMP) $(TFILE).*
#
-gnuplot jac.dem

View file

@ -0,0 +1,281 @@
#include "vdop.h"
#include "geom.h"
#include "getmatrix.h"
#include "jacsolve.h"
#include "userset.h"
#include <cassert>
#include <cmath>
#include <iostream>
#include <vector>
using namespace std;
// #####################################################################
// ParMesh const & mesh,
void JacobiSolve(CRS_Matrix const &SK, vector<double> const &f, vector<double> &u)
{
const double omega = 1.0;
const int maxiter = 300; //LISA: lowered the maxiter
const double tol = 1e-4, // tolerance //LISA: lowered the tollerance
tol2 = tol * tol; // tolerance^2
int nrows = SK.Nrows(); // number of rows == number of columns
assert( nrows == static_cast<int>(f.size()) && f.size() == u.size() );
cout << endl << " Start Jacobi solver for " << nrows << " d.o.f.s" << endl;
// Choose initial guess
for (int k = 0; k < nrows; ++k) {
u[k] = 0.0; // u := 0
}
vector<double> dd(nrows); // matrix diagonal
vector<double> r(nrows); // residual
vector<double> w(nrows); // correction
SK.GetDiag(dd); // dd := diag(K)
////DebugVector(dd);{int ijk; cin >> ijk;}
// Initial sweep
SK.Defect(r, f, u); // r := f - K*u
vddiv(w, r, dd); // w := D^{-1}*r
const double sigma0 = dscapr(w, r); // s0 := <w,r>
// Iteration sweeps
int iter = 0;
double sigma = sigma0;
while ( sigma > tol2 * sigma0 && maxiter > iter) // relative error
//while ( sigma > tol2 && maxiter > iter) // absolute error
{
++iter;
vdaxpy(u, u, omega, w ); // u := u + om*w
SK.Defect(r, f, u); // r := f - K*u
vddiv(w, r, dd); // w := D^{-1}*r
sigma = dscapr(w, r); // s0 := <w,r>
// cout << "Iteration " << iter << " : " << sqrt(sigma/sigma0) << endl;
}
cout << "aver. Jacobi rate : " << exp(log(sqrt(sigma / sigma0)) / iter) << " (" << iter << " iter)" << endl;
cout << "final error: " << sqrt(sigma / sigma0) << " (rel) " << sqrt(sigma) << " (abs)\n";
return;
}
void JacobiSmoother(Matrix const &SK, std::vector<double> const &f, std::vector<double> &u,
std::vector<double> &r, int nsmooth, double const omega, bool zero)
{
// ToDO: ensure compatible dimensions
int const nnodes = static_cast<int>(u.size());
if (zero) { // assumes initial solution is zero
DiagPrecond(SK, f, u, omega);
--nsmooth; // first smoothing sweep done
}
auto const &D = SK.GetDiag(); // accumulated diagonal of matrix @p SK.
for (int ns = 1; ns <= nsmooth; ++ns) {
SK.Defect(r, f, u); // r := f - K*u
#pragma omp parallel for
for (int k = 0; k < nnodes; ++k) {
// u := u + om*D^{-1}*r
u[k] = u[k] + omega * r[k] / D[k]; // MPI: distributed to accumulated vector needed
}
}
return;
}
void DiagPrecond(Matrix const &SK, std::vector<double> const &r, std::vector<double> &w,
double const omega)
{
// ToDO: ensure compatible dimensions
auto const &D = SK.GetDiag(); // accumulated diagonal of matrix @p SK.
int const nnodes = static_cast<int>(w.size());
#pragma omp parallel for
for (int k = 0; k < nnodes; ++k) {
w[k] = omega * r[k] / D[k]; // MPI: distributed to accumulated vector needed
}
return;
}
Multigrid::Multigrid(Mesh const &cmesh, int const nlevel)
: _meshes(cmesh, nlevel),
_SK(), _u(_meshes.size()), _f(_meshes.size()), _d(_meshes.size()), _w(_meshes.size()),
_Pc2f()
{
cout << "\n........................ in Multigrid::Multigrid ..................\n";
// Allocate Memory for matrices/vectors on all levels
for (size_t lev = 0; lev < Nlevels(); ++lev) {
_SK.push_back( FEM_Matrix(_meshes[lev]) ); // CRS matrix
const auto nn = _SK[lev].Nrows();
_u[lev].resize(nn);
_f[lev].resize(nn);
_d[lev].resize(nn);
_w[lev].resize(nn);
auto vv = _meshes[lev].GetFathersOfVertices();
cout << vv.size() << endl;
}
// Intergrid transfer operators
//cout << "\n........................ in Multigrid::Multigrid Prolongation ..................\n";
//_Pc2f.push_back( BisectInterpolation(vector<int>(0)) ); // no prolongation to coarsest grid
_Pc2f.push_back( BisectIntDirichlet() ); // no prolongation to coarsest grid
for (size_t lev = 1; lev < Nlevels(); ++lev) {
//cout << lev << endl;
//cout << _meshes[lev].GetFathersOfVertices () << endl;
_Pc2f.push_back( BisectIntDirichlet( _meshes[lev].GetFathersOfVertices (), _meshes[lev-1].Index_DirichletNodes () ) );
//cout << _Pc2f.back().Nrows() << " " << _Pc2f.back().Ncols() << endl;
}
cout << "\n..........................................\n";
}
Multigrid::~Multigrid()
{}
void Multigrid::DefineOperators()
{
for (size_t lev = 0; lev < Nlevels(); ++lev) {
DefineOperator(lev);
}
return;
}
// GH: Hack
void Multigrid::DefineOperator(size_t lev)
{
_SK[lev].CalculateLaplace(_f[lev]); // fNice() in userset.h
if (lev == Nlevels() - 1) { // fine mesh
_meshes[lev].SetValues(_u[lev], [](double x, double y) -> double
{ return x *x * std::sin(2.5 * M_PI * y); }
);
}
else {
_meshes[lev].SetValues(_u[lev], f_zero);
}
_SK[lev].ApplyDirichletBC(_u[lev], _f[lev]);
return;
}
void Multigrid::JacobiSolve(size_t lev)
{
assert(lev < Nlevels());
::JacobiSolve(_SK[lev], _f[lev], _u[lev]);
}
void Multigrid::MG_Step(size_t lev, int const pre_smooth, bool const bzero, int nu)
{
assert(lev < Nlevels());
int const post_smooth = pre_smooth;
if (lev == 0) { // coarse level
JacobiSmoother(_SK[lev], _f[lev], _u[lev], _d[lev], 100, 1.0, false);
}
else {
JacobiSmoother(_SK[lev], _f[lev], _u[lev], _d[lev], pre_smooth, 0.85, bzero);
if (nu > 0) {
_SK[lev].Defect(_d[lev], _f[lev], _u[lev]); // d := f - K*u
_Pc2f[lev].MultT(_d[lev], _f[lev - 1]); // f_H := R*d
//DefectRestrict(_SK[lev], _Pc2f[lev], _f[lev - 1], _f[lev], _u[lev]); // f_H := R*(f - K*u)
//_meshes[lev-1].Visualize(_f[lev - 1]); // GH: Visualize: f_H should be 0 on Dirichlet B.C.
MG_Step(lev - 1, pre_smooth, true, nu); // solve K_H * u_H =f_H with u_H:=0
for (int k = 1; k < nu; ++k) {
// W-cycle
MG_Step(lev - 1, pre_smooth, false, nu); // solve K_H * u_H =f_H
}
_Pc2f[lev].Mult(_w[lev], _u[lev - 1]); // w := P*u_H
vdaxpy(_u[lev], _u[lev], 1.0, _w[lev] ); // u := u + tau*w
}
JacobiSmoother(_SK[lev], _f[lev], _u[lev], _d[lev], post_smooth, 0.85, false);
}
return;
}
void Multigrid::MG_Solve(int pre_smooth, double eps, int nu)
{
size_t lev=Nlevels()-1; // fine level
// start with zero guess
DiagPrecond(_SK[lev], _f[lev], _w[lev], 1.0); // w := D^{-1]*f
//double s0 = L2_scapr(_f[lev],_w[lev]); // s_0 := <f,w>
double s0 = dscapr(_f[lev],_w[lev]); // s_0 := <f,w>
double si;
bool bzero = true; // start with zero guess
int iter = 0;
do
{
MG_Step(lev, pre_smooth, bzero, nu);
bzero=false;
_SK[lev].Defect(_d[lev], _f[lev], _u[lev]); // d := f - K*u
DiagPrecond(_SK[lev], _d[lev], _w[lev], 1.0); // w := D^{-1]*d
//si = L2_scapr(_d[lev],_w[lev]); // s_i := <d,w>
si = dscapr(_d[lev],_w[lev]); // s_i := <d,w>
++iter;
} while (si>s0*eps*eps);
cout << "\nrel. error: " << sqrt(si/s0) << " ( " << iter << " iter.)" << endl;
return;
}
void JacobiSolveMPI(ParMesh const &mesh, CRS_Matrix const &SK, vector<double> const &f, vector<double> &u)
{
const double omega = 1.0;
const int maxiter = 300;
const double tol = 1e-4,
tol2 = tol * tol;
int nrows = SK.Nrows();
assert(nrows == static_cast<int>(f.size()) && f.size() == u.size());
vector<double> dd(nrows);
vector<double> r(nrows);
vector<double> w(nrows);
SK.GetDiag(dd);
SK.Defect(r, f, u);
vddiv(w, r, dd);
double sigma0 = dscapr(w, r);
double sigma = sigma0;
int iter = 0;
while (sigma > tol2 * sigma0 && iter < maxiter)
{
++iter;
vddiv(w, r, dd); // w = D^-1 * r
mesh.VecAccu(w); // make w consistent across processes
vdaxpy(u, u, omega, w); // u := u + omega * w
SK.Defect(r, f, u);
sigma = dscapr(w, r);
}
if (mesh.MyRank() == 0)
{
cout << "aver. Jacobi rate : "
<< exp(log(sqrt(sigma / sigma0)) / iter)
<< " (" << iter << " iter)" << endl;
cout << "final error: "
<< sqrt(sigma / sigma0) << " (rel) "
<< sqrt(sigma) << " (abs)\n";
}
}

View file

@ -0,0 +1,157 @@
#ifndef JACSOLVE_FILE
#define JACSOLVE_FILE
#include "geom.h"
#include "getmatrix.h"
#include <vector>
#include "par_geom.h"
/**
* Solves linear system of equations K @p u = @p f via the Jacobi iteration.
* We use a distributed symmetric CSR matrix @p SK and initial guess of the
* solution is set to 0.
* @param[in] SK CSR matrix
* @param[in] f distributed local vector storing the right hand side
* @param[out] u accumulated local vector storing the solution.
*/
void JacobiSolve(CRS_Matrix const &SK, std::vector<double> const &f, std::vector<double> &u);
/**
* Solves linear system of equations K @p u = @p f via the Jacobi iteration.
* We use a distributed symmetric CSR matrix @p SK and initial guess of the
* solution is set to 0.
*
* In each smoothing step: @f$ \widehat{u} := u + \omega D^{-1}\left({f-K*u}\right) @f$
*
* @param[in] SK CSR matrix
* @param[in] f distributed local vector storing the right hand side
* @param[out] u accumulated local vector storing the solution.
* @param[in,out] r auxiliary local vector.
* @param[in] nsmooth number of smoothing steps.
* @param[in] omega relaxation parameter.
* @param[in] zero initial solution @p u is assumed to be zero.
*/
void JacobiSmoother(Matrix const &SK, std::vector<double> const &f, std::vector<double> &u,
std::vector<double> & r, int nsmooth=1, double const omega=1.0, bool zero=false);
/**
* @brief Simple diagonale preconditioning.
*
* The residuum @p r scaled by the inverse diagonal of matríx @p SK results in the correction @p w.
*
* @f$ w := \omega D^{-1}*r @f$
*
* @param[in] SK matrix
* @param[in] r distributed local vector storing the residuum
* @param[out] w accumulated local vector storing the correction.
* @param[in] omega relaxation parameter.
*/
void DiagPrecond(Matrix const &SK, std::vector<double> const &r, std::vector<double> &w,
double const omega=1.0);
/**
* @brief The Multigrid hierarchy including meshes, vectors and matrices, prolongations is stored.
*/
class Multigrid
{
public:
/**
* Generates the mesh hierachy with @p nlevel meshes starting from coarse mesh @p cmesg .
*
* The refined meshes are generated by edge bisection.
* All memory is allocated but stiffness matrices are yet not calculated
*
* @param[in] cmesh initial coarse mesh
* @param[in] nlevel number of meshes in hierarchy, including the initial coarse mesh
*
*/
Multigrid(Mesh const& cmesh, int nlevel);
Multigrid(Multigrid const&) = delete;
Multigrid& operator=(Multigrid const&) = delete;
~Multigrid();
/**
* @return Number of meshes in hierarchy.
*/
size_t Nlevels() const
{return _meshes.size(); }
/**
* @return Number of Unknowns.
*/
int Ndofs() const
{return _meshes[Nlevels()-1].Nnodes(); }
/**
* @return Meshes number @p lev .
*/
Mesh const& GetMesh(int lev) const
{ return _meshes[lev]; }
/**
* @return Solution vector at level @p lev .
*/
std::vector<double> const& GetSolution(int lev) const
{ return _u.at(lev); }
/**
* Calculates PDE matrices for all levels.
*/
void DefineOperators();
/**
* Calculates PDE matrix for level @p lev.
*
* @param[in] lev level in hierachy
*/
void DefineOperator(size_t lev);
/**
* Solves the system of equations at level @p lev via Jacobi iterations
*
* @param[in] lev level in hierachy
*/
void JacobiSolve(size_t lev);
/**
* Peformes one multigrid step at level @p lev .
*
* @param[in] lev level in hierachy
* @param[in] pre_smooth number of pre/post-smoothing sweeps
* @param[in] bzero start with zero-vector as solution
* @param[in] nu defines the multigrid cycle (1/2 = V/W)
*/
void MG_Step(size_t lev, int pre_smooth=1, bool const bzero=false, int nu=1);
/**
* Solves the system of equations at finest level via multigrid
*
* @param[in] pre_smooth number of pre/post-smoothing sweeps
* @param[in] eps stopping criteria (relative error)
* @param[in] nu defines the multigrid cycle (1/2 = V/W)
*/
void MG_Solve(int pre_smooth=1, double eps=1e-6, int nu=1);
private:
gMesh_Hierarchy _meshes; //!< mesh hierarchy from coarse (level 0) to fine.
std::vector<FEM_Matrix> _SK; //!< Sparse matrix on each level.
std::vector<std::vector<double>> _u; //!< Solution vector on each level.
std::vector<std::vector<double>> _f; //!< Right hand side vector on each level.
std::vector<std::vector<double>> _d; //!< Defect vector on each level.
std::vector<std::vector<double>> _w; //!< Correction vector on each level.
std::vector<BisectIntDirichlet> _Pc2f; //!< Interpolation to level from next coarser level.
};
void JacobiSolveMPI(ParMesh const &mesh, CRS_Matrix const &SK, std::vector<double> const &f, std::vector<double> &u);
#endif

View file

@ -0,0 +1,8 @@
%% calculate -Laplacian of a function
syms x y c ;
u = x*x*sin(2.5*pi*y)
f = simplify(-laplacian(u,[x,y]))
fsurf(u,[0,1,0,1])
xlabel("x");ylabel("y");

View file

@ -0,0 +1,93 @@
// MPI code in C++.
// See [Gropp/Lusk/Skjellum, "Using MPI", p.33/41 etc.]
// and /opt/mpich/include/mpi2c++/comm.h for details
#include "geom.h"
#include "getmatrix.h"
#include "jacsolve.h"
#include "par_geom.h"
#include "userset.h"
#include "vdop.h"
#include <cmath>
#include <iostream>
#include <mpi.h> // MPI
#include <omp.h> // OpenMP
using namespace std;
int main(int argc , char **argv )
{
MPI_Init(&argc, &argv);
MPI_Comm const icomm(MPI_COMM_WORLD);
//omp_set_num_threads(1); // don't use OMP parallelization for a start
//
{
int np;
MPI_Comm_size(icomm, &np);
//assert(4 == np); // example is only provided for 4 MPI processes
}
// #####################################################################
// ---- Read the f.e. mesh and the mapping of elements to MPI processes
ParMesh const mesh("square"); // Files square_4.m and square_4_sd.txt are needed
//ParMesh mesh(mesh_c, MPI_COMM_WORLD);
//ParMesh const mesh("square_bb");
//ParMesh const mesh("../generate_mesh/rec");
//ParMesh const mesh("../generate_mesh/rec_a");
int const numprocs = mesh.NumProcs();
int const myrank = mesh.MyRank();
if ( 0 == myrank )
{
cout << "\n There are " << numprocs << " processes running.\n \n";
}
int const check_rank = 0; // choose the MPI process you would like to check the mesh
//if ( check_rank == myrank ) mesh.Debug();
//if ( check_rank == myrank ) mesh.DebugEdgeBased();
FEM_Matrix SK(mesh); // CRS matrix
//SK.Debug();
vector<double> uv(SK.Nrows(), 0.0); // temperature
vector<double> fv(SK.Nrows(), 0.0); // r.h.s.
mesh.VecAccu(uv); // Check MPI comm.
SK.CalculateLaplace(fv);
//SK.Debug();
//
mesh.SetValues(uv, [](double x, double y) -> double
{
return x *x * std::sin(2.5 * M_PI * y);
} );
////mesh.SetU(uv); // deprecated
//// Two ways to initialize the vector
////mesh.SetValues(uv,f_zero); // user function
////mesh.SetValues(uv, [](double x, double y) -> double {return 0.0*x*y;} ); // lambda function
////mesh.SetValues(uv, [](double x, double y) -> double {return 5e-3*(x+1)*(y+1);} ); // lambda function
////
//mesh.SetValues(uv, [](double x, double y) -> double {
//return x * x * std::sin(2.5 * M_PI * y);
//} );
SK.ApplyDirichletBC(uv, fv);
//SK.Debug();
double tstart = MPI_Wtime(); // Wall clock
JacobiSolve(SK, fv, uv ); // solve the system of equations
JacobiSolveMPI(mesh, SK, fv, uv ); // MPI: solve the system of equations
double t1 = MPI_Wtime() - tstart; // Wall clock
cout << "JacobiSolve: timing in sec. : " << t1 << endl;
//if (2==myrank || (1==numprocs && 0==myrank) ) mesh.Mesh::Visualize(uv); // Visualize only one subdomain
//mesh.Visualize(uv); // Visualize all subdomains
MPI_Finalize();
return 0;
}