accu.template
Loading...
Searching...
No Matches
par_geom.cpp
Go to the documentation of this file.
1// see: http://llvm.org/docs/CodingStandards.html#include-style
2#include "vdop.h"
3//#include "geom.h"
4#include "par_geom.h"
5
6#include <algorithm>
7#include <array>
8#include <cassert>
9#include <cmath>
10#include <ctime> // contains clock()
11#include <fstream>
12#include <iostream>
13#include <list>
14#include <numeric> // accumulate()
15#include <string>
16#include <vector>
17
18using namespace std;
19
20
21ParMesh::ParMesh(int ndim, int nvert_e, int ndof_e, int nedge_e, MPI_Comm const &icomm)
22 : Mesh(ndim, nvert_e, ndof_e, nedge_e),
23 _icomm(icomm), _numprocs(-1), _myrank(-1),
24 _v_l2g(0), _t_l2g(0), _v_g2l{{}}, _t_g2l{{}}, _valence(0),
25 _sendbuf(0), _sendcounts(0), _sdispls(0),
26 _loc_itf(0), _gloc_itf(0), _buf2loc(0)
27{
28 MPI_Comm_size(icomm, &_numprocs);
29 MPI_Comm_rank(icomm, &_myrank);
30}
31
34
35
36
37ParMesh::ParMesh(std::string const &sname, MPI_Comm const &icomm)
38 : ParMesh(2, 3, 3, 3, icomm) // two dimensions, 3 vertices, 3 dofs, 3 edges per element
39{
40 //const int numprocs = _icomm.Get_size();
41 const string NS = "_" + to_string(_numprocs);
42 const string fname = sname + NS + ".txt";
43 //cout << "############ " << fname << endl;
45 cout << "\n End of sequential File read \n";
46 // ------------------------------------------------------------------------------
47 // Until this point a l l processes possess a l l mesh info in g l o b a l numbering
48 //
49 // Now, we have to select the data belonging to my_rank
50 // and we have to create the mapping local to global (l2g) and vice versa (g2l)
51 // ------------------------------------------------------------------------------
52
53 // save the global node mesh (maybe we need it later)
54 DeriveEdgeFromVertexBased(); // and even more
55 Mesh global_mesh(*this); // requires a l o t of memory
57
58 // read the subdomain info
59 const string dname = sname + NS + "_sd" + ".txt";
60 vector<int> t2d = ReadElementSubdomains(dname); // global mapping triangle to subdomain for all elements
61
62 //const int myrank = _icomm.Get_rank();
63 Transform_Local2Global_Vertex(_myrank, t2d); // Vertex based mesh: now in l o c a l indexing
64
65 DeriveEdgeFromVertexBased(); // Generate also the l o c a l edge based information
66
68
69
70 // Now we have to organize the MPI communication of vertices on the subdomain interfaces
71
72 return;
73}
74
75vector<int> ParMesh::ReadElementSubdomains(string const &dname)
76{
77 ifstream ifs(dname);
78 if (!(ifs.is_open() && ifs.good())) {
79 cerr << "ParMesh::ReadElementSubdomain: Error cannot open file " << dname << endl;
80 assert(ifs.is_open());
81 }
82
83 int const OFFSET{1}; // Matlab to C indexing
84 cout << "ASCI file " << dname << " opened" << endl;
85
86 // Read some mesh constants
87 int nelem;
88 ifs >> nelem;
89 cout << nelem << " " << Nelems() << endl;
90 assert( Nelems() == nelem);
91
92 // Allocate memory
93 vector<int> t2d(nelem, -1);
94 // Read element mapping
95 for (int k = 0; k < nelem; ++k) {
96 int tmp;
97 ifs >> tmp;
98 //t2d[k] = tmp - OFFSET;
99 // 2020-01-08
100 t2d[k] = min(tmp, NumProcs()) - OFFSET;
101 }
102
103 return t2d;
104}
105
106void ParMesh::Transform_Local2Global_Vertex(int const myrank, vector<int> const &t2d)
107{
108 // number of local elements
109 const int l_ne = count(t2d.cbegin(), t2d.cend(), myrank);
110 //cout << myrank << ":: " << lne << endl;
111 vector<int> l_ia(l_ne * NverticesElements(), -1); // local elements still with global vertex numbers
112 _t_l2g.resize(l_ne, -1);
113
114 int lk = 0;
115 for (size_t k = 0; k < t2d.size(); ++k) {
116 if (myrank == t2d[k]) {
117 //if (0==myrank)
118 //{
119 //cout << lk << " k " << t2d[k] << endl;
120 //}
121 l_ia[3 * lk ] = _ia[3 * k ];
122 l_ia[3 * lk + 1] = _ia[3 * k + 1];
123 l_ia[3 * lk + 2] = _ia[3 * k + 2]; // local elements still with global vertex numbers
124 _t_l2g[lk] = k; // elements: local to global mapping
125 _t_g2l[k] = lk; // global to local
126 ++lk;
127 }
128 }
129 // Checks:
130 assert( count(l_ia.cbegin(), l_ia.cend(), -1) == 0 );
131 assert( count(_t_l2g.cbegin(), _t_l2g.cend(), -1) == 0 );
132
133 // Vertices: local to global mapping
134 auto tmp = l_ia;
135 sort(tmp.begin(), tmp.end());
136 auto ip = unique(tmp.begin(), tmp.end());
137 tmp.erase(ip, tmp.end());
138 _v_l2g = tmp; // Vertices: local to global mapping
139 for (size_t lkv = 0; lkv < _v_l2g.size(); ++lkv) {
140 _v_g2l[_v_l2g[lkv]] = lkv; // global to local
141 }
142
143 // Boundary edges
144 vector<int> l_bedges;
145 vector<int> l_sdedges;
146 for (size_t b = 0; b < _bedges.size(); b += 2) {
147 int const v1 = _bedges[b ]; // global vertex numbers
148 int const v2 = _bedges[b + 1];
149 try {
150 int const lv1 = _v_g2l.at(v1); // map[] would add that element
151 int const lv2 = _v_g2l.at(v2); // but at() throws an exeption
152 l_bedges.push_back(lv1);
153 l_bedges.push_back(lv2); // Boundaries: already in local indexing
154 // 2020-01-08
155 l_sdedges.push_back(_sdedges[b ]);
156 l_sdedges.push_back(_sdedges[b+1]);
157 }
158 catch (std::out_of_range & err) {
159 //cerr << ".";
160 }
161 }
162
163 // number of local vertices
164 const int l_nn = _v_l2g.size();
165 vector<double> l_xc(Ndims()*l_nn);
166 for (int lkk = 0; lkk < l_nn; ++lkk) {
167 int k = _v_l2g.at(lkk);
168 l_xc[2 * lkk ] = _xc[2 * k ];
169 l_xc[2 * lkk + 1] = _xc[2 * k + 1];
170 }
171
172
173 // Now, we represent the vertex mesh in l o c a l numbering
174 // elements
175
176 for (size_t i = 0; i < l_ia.size(); ++i) {
177 l_ia[i] = _v_g2l.at(l_ia[i]); // element vertices: global to local
178 }
179 SetNelem(l_ne);
180 _ia = l_ia;
181 // boundary
182 _bedges = l_bedges;
183 _sdedges = l_sdedges;
184 // coordinates
185 SetNnode(l_nn);
186 _xc = l_xc;
187
188 return;
189}
190
191
193{
194 // Some checks
195 int lnn = Nnodes(); // local number of vertices
196 assert(static_cast<int>(_v_l2g.size()) == lnn);
197 int ierr{-12345};
198
199 // ---- Determine global largest vertex index
200 int gidx_max{-1}; // global largest vertex index
201 int lmax = *max_element(_v_l2g.cbegin(), _v_l2g.cend());
202 MPI_Allreduce(&lmax, &gidx_max, 1, MPI_INT, MPI_MAX, _icomm);
203 int gidx_min{-1}; // global smallest vertex index
204 int lmin = *min_element(_v_l2g.cbegin(), _v_l2g.cend());
205 MPI_Allreduce(&lmin, &gidx_min, 1, MPI_INT, MPI_MIN, _icomm);
206 //cout << gidx_min << " " << gidx_max << endl;
207 assert(0 == gidx_min); // global indices have to start with 0
208
209
210 // ---- Determine for all global vertices the number of subdomains it belongs to
211 vector<int> global(gidx_max+1, 0); // global scalar array for vertices
212 for (auto const gidx : _v_l2g) global[gidx] = 1;
213 // https://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node109.htm
214 ierr = MPI_Allreduce(MPI_IN_PLACE, global.data(), global.size(), MPI_INT, MPI_SUM, _icomm);
215 //if (0 == MyRank()) cout << global << endl;
216 //MPI_Barrier(_icomm);
217 //cout << _xc[2*_v_g2l.at(2)] << " , " << _xc[2*_v_g2l.at(2)+1] << endl;
218 //MPI_Barrier(_icomm);
219
220 // now, global[] contains the number of subdomains a global vertex belongs to
221 if ( count(global.cbegin(), global.cend(), 0) > 0 )
222 cerr << "\n !!! Non-continuous global vertex indexing !!!\n";
223
224 // ---- Determine local interface vertices ( <==> global[] > 1 )
225 // _loc_itf, neigh_itf
226 //vector<int> loc_itf; // local indices of interface vertices on this MPI process
227 for (size_t lk = 0; lk < _v_l2g.size(); ++lk) {
228 int const gk = _v_l2g[lk]; // global index of local vertex lk
229 if ( global[gk] > 1 ) {
230 _loc_itf.push_back(lk); // local indices of interface vertices on this MPI process
231 }
232 }
233
234 //MPI_Barrier(_icomm);
235 //if (0 == MyRank()) cout << "\n..._loc_itf...\n" << _loc_itf << "\n......\n";
236 //MPI_Barrier(_icomm);
237 // ---- global indices of local interface vertices
238 //auto gloc_itf(_loc_itf);
240 for_each(_gloc_itf.begin(), _gloc_itf.end(), [this] (auto & v) -> void { v = _v_l2g[v];} );
241 //MPI_Barrier(_icomm);
242 //if (0 == MyRank()) cout << "\n..._gloc_itf...\n" << _gloc_itf << "\n......\n";
243 //DebugVector(_gloc_itf,"_gloc_itf");
244
245 // ---- Determine the global length of interfaces
246 vector<int> vnn(NumProcs(), -1); // number of interface vertices per MPI rank
247 int l_itf(_loc_itf.size()); // # local interface vertices
248 ierr = MPI_Allgather(&l_itf, 1, MPI_INT, vnn.data(), 1, MPI_INT, _icomm);
249 assert(0 == ierr);
250 //cout << vnn << endl;
251
252 // ---- Now we consider only the inferface vertices
253 int snn = accumulate(vnn.cbegin(), vnn.cend(), 0); // required length of array for global interface indices
254 //cout << snn << " " << gnn << endl;
255 vector<int> dispnn(NumProcs(), 0) ; // displacement of interface vertices per MPI rank
256 partial_sum(vnn.cbegin(), vnn.cend() - 1, dispnn.begin() + 1);
257 //cout << dispnn << endl;
258
259 // ---- Get the global indices for all global interfaces
260 vector<int> g_itf(snn, -1); // collects all global indices of the global interfaces
261 // https://www.mpich.org/static//docs/v3.0.x/www3/MPI_Gatherv.html
262 ierr = MPI_Gatherv( _gloc_itf.data(), _gloc_itf.size(), MPI_INT,
263 g_itf.data(), vnn.data(), dispnn.data(), MPI_INT, 0, _icomm);
264 assert(0 == ierr);
265 // https://www.mpich.org/static/docs/v3.1/www3/MPI_Bcast.html
266 ierr = MPI_Bcast(g_itf.data(), g_itf.size(), MPI_INT, 0, _icomm);
267 assert(0 == ierr); // Now, each MPI rank has the all global indices of the global interfaces
268 //MPI_Barrier(_icomm);
269 //if (MyRank() == 0) cout << "\n...g_itf...\n" << g_itf << "\n......\n";
270 //MPI_Barrier(_icomm);
271
272 // ----- Determine all MPI ranks a local interface vertex belongs to
273 vector<vector<int>> neigh_itf(_loc_itf.size());// subdomains a local interface vertex belongs to
274 for (size_t lk = 0; lk < _loc_itf.size(); ++lk) {
275 const int gvert = _gloc_itf[lk]; // global index of local interface node lk
276 for (int rank = 0; rank < NumProcs(); ++rank) {
277 auto const startl = g_itf.cbegin() + dispnn[rank];
278 auto const endl = startl + vnn[rank];
279 if ( find( startl, endl, gvert) != endl) {
280 neigh_itf[lk].push_back(rank);
281 }
282 }
283 }
284
285 // ---- check the available info in _loc_itf[lk], _gloc_itf[lk], neigh_itf[lk]
286 //MPI_Barrier(_icomm);
288 //if (MyRank() == 0) {
289 //for (size_t lk = 0; lk < _loc_itf.size(); ++lk ) {
290 //cout << lk << " : local idx " << _loc_itf[lk] << " , global idx " << _gloc_itf[lk];
291 //cout << " with MPI ranks " << neigh_itf[lk] << endl;
292 //}
293 //}
294 //MPI_Barrier(_icomm);
295
296 // ---- store the valence (e.g., the number of subdomains it belongs to) of all local vertices
297 _valence.resize(Nnodes(),1);
298 for (size_t lk = 0; lk < _loc_itf.size(); ++lk)
299 {
300 _valence[_loc_itf[lk]] = neigh_itf[lk].size();
301 }
302 //DebugVector(_valence,"_valence",_icomm);
303
304 // ---- We ware going to use MPI_Alltoallv for data exchange on interfaces
305 // https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node109.htm#Node109
306 // https://www.open-mpi.org/doc/v4.0/man3/MPI_Alltoallv.3.php
307 //int MPI_Alltoallv(const void* sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void* recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm)
308 //
309 // MPI_Alltoallv needs:
310 // vector<double> sendbuf (MPI_IN_PLACE: used also as recvbuf)
311 // vector<int> sendcounts (the same as for recv)
312 // vector<int> sdispls (the same as for recv)
313 //
314 // We need to map the interface vertices onto the sendbuffer:
315 // vector<int> loc_itf local index of interface vertex lk
316 // vector<int> gloc_itf global index of interface vertex lk
317 // vector<int> buf2loc local indices of sendbuffer positions (the same as for recv)
318
319 // ---- Determine sendcounts[] and sdipls[] from neigh_itf[]
320 //vector<int> _sendcounts(NumProcs(), 0);
321 _sendcounts.resize(NumProcs(), 0);
322 for (size_t lk = 0; lk < _loc_itf.size(); ++lk ) {
323 auto const &kneigh = neigh_itf[lk];
324 for (size_t ns = 0; ns < kneigh.size(); ++ns) {
325 ++_sendcounts[kneigh[ns]];
326 }
327 }
328 //if (MyRank() == 0) cout << "\n..._sendcounts ...\n" << _sendcounts << endl;
329
330 //vector<int> _sdispls(NumProcs(), 0);
331 _sdispls.resize(NumProcs(), 0);
332 partial_sum(_sendcounts.cbegin(), _sendcounts.cend() - 1, _sdispls.begin() + 1);
333 //vector<int> _sdispls(NumProcs()+1, 0);
334 //partial_sum(_sendcounts.cbegin(), _sendcounts.cend(), _sdispls.begin() + 1);
335 //if (MyRank() == 0) cout << "\n..._sdispls ...\n" << _sdispls << endl;
336
337 // ---- Determine size of buffer 'nbuffer' and mapping 'buf2loc'
338 int const nbuffer = accumulate(_sendcounts.cbegin(), _sendcounts.cend(), 0);
339 //vector<int> _buf2loc(nbuffer, -1);
340 _buf2loc.resize(nbuffer, -1);
341 int buf_idx = 0; // position in buffer
342 for (int rank = 0; rank < NumProcs(); ++rank) {
343 assert( buf_idx == _sdispls[rank]);
344 for (size_t lk = 0; lk < _loc_itf.size(); ++lk ) {
345 auto const &kneigh = neigh_itf[lk];
346 if (find(kneigh.cbegin(),kneigh.cend(),rank)!=kneigh.cend())
347 {
348 _buf2loc[buf_idx] = _loc_itf[lk];
349 ++buf_idx;
350 }
351 }
352 }
353 //if (MyRank() == 0) cout << "\n...buf2loc ...\n" << buf2loc << endl;
354 //DebugVector(buf2loc,"buf2loc",_icomm);
355
356 // ---- Allocate send/recv buffer
357 //vector<double> _sendbuf(nbuffer,-1.0);
358 _sendbuf.resize(nbuffer,-1.0);
359
361 cout << " Check of data exchange (InPlace) successful!\n";
362 assert(CheckInterfaceExchange());
363 cout << " Check of data exchange successful!\n";
365 cout << " Check of data add successful!\n";
366 assert(CheckInterfaceAdd());
367 cout << " Check of data add (InPlace) successful!\n";
368
369 vector<double> x(Nnodes(),-1.0);
370 VecAccu(x);
371 cout << " VecAccu (InPlace) successful!\n";
372
373
374 return;
375}
376
378{
379 vector<double> x(Nnodes(),-1.0);
380 copy(_v_l2g.cbegin(),_v_l2g.cend(),x.begin()); // init x with global vertex indices
381
382 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
383 {
384 _sendbuf[ls] = x[_buf2loc.at(ls)];
385 }
386 int ierr = MPI_Alltoallv(MPI_IN_PLACE, _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
387 _sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
388 assert(ierr==0);
389 //DebugVector(_sendbuf,"_sendbuf",_icomm);
390
391 vector<double> y(x);
392 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = -1.0; // only for interface nodes
393 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
394 {
395 y[_buf2loc.at(ls)] = _sendbuf[ls];
396 }
397
398 double const eps=1e-10;
399 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
400 [eps](double a, double b) -> bool
401 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
402 );
403 return bv;
404}
405
407{
408 vector<double> x(Nnodes(),-1.0);
409 copy(_v_l2g.cbegin(),_v_l2g.cend(),x.begin()); // init x with global vertex indices
410
411 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
412 {
413 _sendbuf[ls] = x[_buf2loc.at(ls)];
414 }
415 vector<double> recvbuf(_sendbuf.size());
416 int ierr = MPI_Alltoallv(_sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
417 recvbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
418 //DebugVector(_sendbuf,"_sendbuf",_icomm);
419 //DebugVector(recvbuf,"recvbuf",_icomm);
420 assert(ierr==0);
421
422 vector<double> y(x);
423 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = -1.0; // only for interface nodes
424 for(size_t ls = 0; ls<recvbuf.size(); ++ls)
425 {
426 y[_buf2loc.at(ls)] = recvbuf[ls];
427 }
428 //cout << "WRONG : " << count(y.cbegin(),y.cend(), -1.0) << endl;
429
430 double const eps=1e-10;
431 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
432 [eps](double a, double b) -> bool
433 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
434 );
435 return bv;
436}
437
439{
440 vector<double> x(Nnodes(),-1.0);
441 for (size_t i=0; i<x.size(); ++i)
442 {
443 x[i] = _xc[2*i]+_xc[2*i+1]; // init x with coordinate values
444 }
445
446 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
447 {
448 _sendbuf[ls] = x[_buf2loc.at(ls)];
449 }
450 int ierr = MPI_Alltoallv(MPI_IN_PLACE, _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
451 _sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
452 assert(ierr==0);
453 //DebugVector(_sendbuf,"_sendbuf",_icomm);
454
455 vector<double> y(x);
456 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = 0.0; // only for interface nodes
457 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
458 {
459 y[_buf2loc.at(ls)] += _sendbuf[ls];
460 }
461 MPI_Barrier(_icomm);
462 //DebugVector(x,"x",_icomm);
463 //DebugVector(y,"y",_icomm);
464 for (size_t i= 0; i<y.size(); ++i) y[i]/=_valence[i]; // divide by valence
465
466 double const eps=1e-10;
467 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
468 [eps](double a, double b) -> bool
469 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
470 );
471 return bv;
472}
473
475{
476 vector<double> x(Nnodes(),-1.0);
477 for (size_t i=0; i<x.size(); ++i)
478 {
479 //x[i] = _xc[2*i]+_xc[2*i+1]; // init x with coordinate values
480 x[i] = _v_l2g[i];
481 }
482
483 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
484 {
485 _sendbuf[ls] = x[_buf2loc.at(ls)];
486 }
487 vector<double> recvbuf(_sendbuf.size());
488 int ierr = MPI_Alltoallv(_sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
489 recvbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
490 //DebugVector(_sendbuf,"_sendbuf",_icomm);
491 //DebugVector(recvbuf,"recvbuf",_icomm);
492 assert(ierr==0);
493
494 vector<double> y(x);
495 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = 0.0; // only for interface nodes
496 for(size_t ls = 0; ls<recvbuf.size(); ++ls)
497 {
498 //if (0==MyRank()) cout << ls << ": " << _buf2loc.at(ls) << " " << y[_buf2loc.at(ls)] << "("<< x[_buf2loc.at(ls)] << ")" << " " << recvbuf[ls] << " (" << _sendbuf[ls] << ")" << endl;
499 y[_buf2loc.at(ls)] += recvbuf[ls];
500 }
501 MPI_Barrier(_icomm);
502 //DebugVector(x,"x",_icomm);
503 //DebugVector(y,"y",_icomm);
504 for (size_t i= 0; i<y.size(); ++i) y[i]/=_valence[i]; // divide by valence
505
506 double const eps=1e-10;
507 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
508 [eps](double a, double b) -> bool
509 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
510 );
511 return bv;
512}
513
514
515// ----------
516
517void ParMesh::VecAccu(std::vector<double> &w) const
518{
519 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
520 {
521 _sendbuf[ls] = w[_buf2loc.at(ls)];
522 }
523 int ierr = MPI_Alltoallv(MPI_IN_PLACE, _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
524 _sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
525 assert(ierr==0);
526 //DebugVector(_sendbuf,"_sendbuf",_icomm);
527
528 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) w[_loc_itf.at(lk)] = 0.0; // only for interface nodes
529 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
530 {
531 w[_buf2loc.at(ls)] += _sendbuf[ls];
532 }
533
534 return;
535}
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
tmp(:,:).'
Definition geom.h:14
std::vector< int > _sdedges
boundary edges [nbedges][2] with left/right subdomain number
Definition geom.h:434
std::vector< int > _ia
element connectivity
Definition geom.h:427
void DeriveEdgeFromVertexBased()
Definition geom.h:362
std::vector< double > _xc
coordinates
Definition geom.h:428
int Ndims() const
Definition geom.h:98
int Nelems() const
Definition geom.h:62
int Nnodes() const
Definition geom.h:89
std::vector< int > _bedges
boundary edges [nbedges][2] storing start/end vertex
Definition geom.h:432
int NverticesElements() const
Definition geom.h:71
void SetNnode(int nnode)
Definition geom.h:333
void Del_EdgeConnectivity()
Definition geom.cpp:801
void ReadVertexBasedMesh(std::string const &fname)
Definition geom.cpp:713
void SetNelem(int nelem)
Definition geom.h:318
void Generate_VectorAdd()
Definition par_geom.cpp:192
std::vector< int > _v_l2g
vertices: local to global mapping
Definition par_geom.h:125
int _numprocs
number of MPI processes
Definition par_geom.h:123
MPI_Comm const _icomm
MPI communicator for the group of processes.
Definition par_geom.h:122
std::vector< int > _sendcounts
number of data to send to each MPI rank (the same as for recv)
Definition par_geom.h:135
bool CheckInterfaceAdd_InPlace() const
Definition par_geom.cpp:438
bool CheckInterfaceExchange() const
Definition par_geom.cpp:406
std::vector< int > _gloc_itf
global index of interface vertex lk
Definition par_geom.h:140
std::vector< int > _sdispls
offset of data to send to each MPI rank wrt. _senbuffer (the same as for recv)
Definition par_geom.h:136
std::vector< int > ReadElementSubdomains(std::string const &dname)
Definition par_geom.cpp:75
std::map< int, int > _v_g2l
vertices: global to local mapping
Definition par_geom.h:127
bool CheckInterfaceExchange_InPlace() const
Definition par_geom.cpp:377
virtual ~ParMesh()
Definition par_geom.cpp:32
std::vector< double > _sendbuf
send buffer a n d receiving buffer (MPI_IN_PLACE)
Definition par_geom.h:134
int _myrank
my MPI rank
Definition par_geom.h:124
std::vector< int > _loc_itf
local index of interface vertex lk
Definition par_geom.h:139
std::map< int, int > _t_g2l
triangles: global to local mapping
Definition par_geom.h:128
void VecAccu(std::vector< double > &w) const
Definition par_geom.cpp:517
ParMesh(int ndim, int nvert_e=0, int ndof_e=0, int nedge_e=0, MPI_Comm const &icomm=MPI_COMM_WORLD)
Definition par_geom.cpp:21
int NumProcs() const
Definition par_geom.h:107
std::vector< int > _t_l2g
triangles: local to global mapping
Definition par_geom.h:126
bool CheckInterfaceAdd() const
Definition par_geom.cpp:474
std::vector< int > _valence
valence of local vertices, i.e. number of subdomains they belong to
Definition par_geom.h:132
void Transform_Local2Global_Vertex(int myrank, std::vector< int > const &t2d)
Definition par_geom.cpp:106
std::vector< int > _buf2loc
local indices of sendbuffer positions (the same as for recv)
Definition par_geom.h:141