Go to the documentation of this file.
26 explicit ParMesh(
int ndim,
int nvert_e = 0,
int ndof_e = 0,
int nedge_e = 0, MPI_Comm
const &icomm = MPI_COMM_WORLD);
48 explicit ParMesh(std::string
const &sname, MPI_Comm
const &icomm = MPI_COMM_WORLD);
50 void VecAccu(std::vector<double> &w)
const;
57 double dscapr(std::vector<double>
const &x, std::vector<double>
const &y)
const
std::vector< int > ReadElementSubdomains(std::string const &dname)
std::vector< int > _sdispls
offset of data to send to each MPI rank wrt. _senbuffer (the same as for recv)
bool CheckInterfaceExchange() const
const MPI_Comm _icomm
MPI communicator for the group of processes.
bool CheckInterfaceExchange_InPlace() const
std::vector< int > _valence
valence of local vertices, i.e. number of subdomains they belong to
std::vector< int > _sendcounts
number of data to send to each MPI rank (the same as for recv)
double dscapr(std::vector< double > const &x, std::vector< double > const &y) const
std::map< int, int > _t_g2l
triangles: global to local mapping
MPI_Comm GetCommunicator() const
std::vector< int > _loc_itf
local index of interface vertex lk
std::vector< int > _t_l2g
triangles: local to global mapping
double par_scalar(vector< double > const &x, vector< double > const &y, MPI_Comm const &icomm)
std::map< int, int > _v_g2l
vertices: global to local mapping
std::vector< int > _gloc_itf
global index of interface vertex lk
bool CheckInterfaceAdd() const
int _numprocs
number of MPI processes
std::vector< int > _buf2loc
local indices of sendbuffer positions (the same as for recv)
void VecAccu(std::vector< double > &w) const
std::vector< int > _v_l2g
vertices: local to global mapping
ParMesh & operator=(ParMesh const &)=delete
void Transform_Local2Global_Vertex(int myrank, std::vector< int > const &t2d)
void Generate_VectorAdd()
bool CheckInterfaceAdd_InPlace() const
ParMesh(int ndim, int nvert_e=0, int ndof_e=0, int nedge_e=0, MPI_Comm const &icomm=MPI_COMM_WORLD)
std::vector< double > _sendbuf
send buffer a n d receiving buffer (MPI_IN_PLACE)