jacobi.template
Loading...
Searching...
No Matches
par_geom.cpp
Go to the documentation of this file.
1// see: http://llvm.org/docs/CodingStandards.html#include-style
2#include "vdop.h"
3//#include "geom.h"
4#include "par_geom.h"
5
6#include <algorithm>
7#include <array>
8#include <cassert>
9#include <cmath>
10#include <ctime> // contains clock()
11#include <fstream>
12#include <iostream>
13#include <list>
14#include <numeric> // accumulate()
15#include <string>
16#include <vector>
17
18using namespace std;
19
20
21ParMesh::ParMesh(int ndim, int nvert_e, int ndof_e, int nedge_e, MPI_Comm const &icomm)
22 : Mesh(ndim, nvert_e, ndof_e, nedge_e),
23 _icomm(icomm), _numprocs(-1), _myrank(-1),
24 _v_l2g(0), _t_l2g(0), _v_g2l{{}}, _t_g2l{{}}, _valence(0),
25 _sendbuf(0), _sendcounts(0), _sdispls(0),
26 _loc_itf(0), _gloc_itf(0), _buf2loc(0)
27{
28 MPI_Comm_size(icomm, &_numprocs);
29 MPI_Comm_rank(icomm, &_myrank);
30}
31
34
35
36
37ParMesh::ParMesh(std::string const &sname, MPI_Comm const &icomm)
38 : ParMesh(2, 3, 3, 3, icomm) // two dimensions, 3 vertices, 3 dofs, 3 edges per element
39{
40 //const int numprocs = _icomm.Get_size();
41 const string NS = "_" + to_string(_numprocs);
42 const string fname = sname + NS + ".txt";
43 //cout << "############ " << fname << endl;
45 // ------------------------------------------------------------------------------
46 // Until this point a l l processes possess a l l mesh info in g l o b a l numbering
47 //
48 // Now, we have to select the data belonging to my_rank
49 // and we have to create the mapping local to global (l2g) and vice versa (g2l)
50 // ------------------------------------------------------------------------------
51
52 // save the global node mesh (maybe we need it later)
53 DeriveEdgeFromVertexBased(); // and even more
54 Mesh global_mesh(*this); // requires a l o t of memory
56
57 // read the subdomain info
58 const string dname = sname + NS + "_sd" + ".txt";
59 vector<int> t2d = ReadElementSubdomains(dname); // global mapping triangle to subdomain for all elements
60
61 //const int myrank = _icomm.Get_rank();
62 Transform_Local2Global_Vertex(_myrank, t2d); // Vertex based mesh: now in l o c a l indexing
63
64 DeriveEdgeFromVertexBased(); // Generate also the l o c a l edge based information
65
67
68
69 // Now we have to organize the MPI communication of vertices on the subdomain interfaces
70
71 return;
72}
73
74vector<int> ParMesh::ReadElementSubdomains(string const &dname)
75{
76 ifstream ifs(dname);
77 if (!(ifs.is_open() && ifs.good())) {
78 cerr << "ParMesh::ReadElementSubdomain: Error cannot open file " << dname << endl;
79 assert(ifs.is_open());
80 }
81
82 int const OFFSET{1}; // Matlab to C indexing
83 cout << "ASCI file " << dname << " opened" << endl;
84
85 // Read some mesh constants
86 int nelem;
87 ifs >> nelem;
88 cout << nelem << " " << Nelems() << endl;
89 assert( Nelems() == nelem);
90
91 // Allocate memory
92 vector<int> t2d(nelem, -1);
93 // Read element mapping
94 for (int k = 0; k < nelem; ++k) {
95 int tmp;
96 ifs >> tmp;
97 //t2d[k] = tmp - OFFSET;
98 // 2020-01-08
99 t2d[k] = min(tmp, NumProcs()) - OFFSET;
100 }
101
102 return t2d;
103}
104
105void ParMesh::Transform_Local2Global_Vertex(int const myrank, vector<int> const &t2d)
106{
107 // number of local elements
108 const int l_ne = count(t2d.cbegin(), t2d.cend(), myrank);
109 //cout << myrank << ":: " << lne << endl;
110 vector<int> l_ia(l_ne * NverticesElements(), -1); // local elements still with global vertex numbers
111 _t_l2g.resize(l_ne, -1);
112
113 int lk = 0;
114 for (size_t k = 0; k < t2d.size(); ++k) {
115 if (myrank == t2d[k]) {
116 //if (0==myrank)
117 //{
118 //cout << lk << " k " << t2d[k] << endl;
119 //}
120 l_ia[3 * lk ] = _ia[3 * k ];
121 l_ia[3 * lk + 1] = _ia[3 * k + 1];
122 l_ia[3 * lk + 2] = _ia[3 * k + 2]; // local elements still with global vertex numbers
123 _t_l2g[lk] = k; // elements: local to global mapping
124 _t_g2l[k] = lk; // global to local
125 ++lk;
126 }
127 }
128 // Checks:
129 assert( count(l_ia.cbegin(), l_ia.cend(), -1) == 0 );
130 assert( count(_t_l2g.cbegin(), _t_l2g.cend(), -1) == 0 );
131
132 // Vertices: local to global mapping
133 auto tmp = l_ia;
134 sort(tmp.begin(), tmp.end());
135 auto ip = unique(tmp.begin(), tmp.end());
136 tmp.erase(ip, tmp.end());
137 _v_l2g = tmp; // Vertices: local to global mapping
138 for (size_t lkv = 0; lkv < _v_l2g.size(); ++lkv) {
139 _v_g2l[_v_l2g[lkv]] = lkv; // global to local
140 }
141
142 // Boundary edges
143 vector<int> l_bedges;
144 vector<int> l_sdedges;
145 for (size_t b = 0; b < _bedges.size(); b += 2) {
146 int const v1 = _bedges[b ]; // global vertex numbers
147 int const v2 = _bedges[b + 1];
148 try {
149 int const lv1 = _v_g2l.at(v1); // map[] would add that element
150 int const lv2 = _v_g2l.at(v2); // but at() throws an exeption
151 l_bedges.push_back(lv1);
152 l_bedges.push_back(lv2); // Boundaries: already in local indexing
153 // 2020-01-08
154 l_sdedges.push_back(_sdedges[b ]);
155 l_sdedges.push_back(_sdedges[b+1]);
156 }
157 catch (std::out_of_range &err) {
158 //cerr << ".";
159 }
160 }
161
162 // number of local vertices
163 const int l_nn = _v_l2g.size();
164 vector<double> l_xc(Ndims()*l_nn);
165 for (int lkk = 0; lkk < l_nn; ++lkk) {
166 int k = _v_l2g.at(lkk);
167 l_xc[2 * lkk ] = _xc[2 * k ];
168 l_xc[2 * lkk + 1] = _xc[2 * k + 1];
169 }
170
171
172 // Now, we represent the vertex mesh in l o c a l numbering
173 // elements
174
175 for (size_t i = 0; i < l_ia.size(); ++i) {
176 l_ia[i] = _v_g2l.at(l_ia[i]); // element vertices: global to local
177 }
178 SetNelem(l_ne);
179 _ia = l_ia;
180 // boundary
181 _bedges = l_bedges;
182 _sdedges = l_sdedges;
183 // coordinates
184 SetNnode(l_nn);
185 _xc = l_xc;
186
187 return;
188}
189
190
192{
193 // Some checks
194 int lnn = Nnodes(); // local number of vertices
195 assert(static_cast<int>(_v_l2g.size()) == lnn);
196 int ierr{-12345};
197
198 // ---- Determine global largest vertex index
199 int gidx_max{-1}; // global largest vertex index
200 int lmax = *max_element(_v_l2g.cbegin(), _v_l2g.cend());
201 MPI_Allreduce(&lmax, &gidx_max, 1, MPI_INT, MPI_MAX, _icomm);
202 int gidx_min{-1}; // global smallest vertex index
203 int lmin = *min_element(_v_l2g.cbegin(), _v_l2g.cend());
204 MPI_Allreduce(&lmin, &gidx_min, 1, MPI_INT, MPI_MIN, _icomm);
205 //cout << gidx_min << " " << gidx_max << endl;
206 assert(0 == gidx_min); // global indices have to start with 0
207
208
209 // ---- Determine for all global vertices the number of subdomains it belongs to
210 vector<int> global(gidx_max+1, 0); // global scalar array for vertices
211 for (auto const gidx : _v_l2g) global[gidx] = 1;
212 // https://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node109.htm
213 ierr = MPI_Allreduce(MPI_IN_PLACE, global.data(), global.size(), MPI_INT, MPI_SUM, _icomm);
214 //if (0 == MyRank()) cout << global << endl;
215 //MPI_Barrier(_icomm);
216 //cout << _xc[2*_v_g2l.at(2)] << " , " << _xc[2*_v_g2l.at(2)+1] << endl;
217 //MPI_Barrier(_icomm);
218
219 // now, global[] contains the number of subdomains a global vertex belongs to
220 if ( count(global.cbegin(), global.cend(), 0) > 0 )
221 cerr << "\n !!! Non-continuous global vertex indexing !!!\n";
222
223 // ---- Determine local interface vertices ( <==> global[] > 1 )
224 // _loc_itf, neigh_itf
225 //vector<int> loc_itf; // local indices of interface vertices on this MPI process
226 for (size_t lk = 0; lk < _v_l2g.size(); ++lk) {
227 int const gk = _v_l2g[lk]; // global index of local vertex lk
228 if ( global[gk] > 1 ) {
229 _loc_itf.push_back(lk); // local indices of interface vertices on this MPI process
230 }
231 }
232
233 //MPI_Barrier(_icomm);
234 //if (0 == MyRank()) cout << "\n..._loc_itf...\n" << _loc_itf << "\n......\n";
235 //MPI_Barrier(_icomm);
236 // ---- global indices of local interface vertices
237 //auto gloc_itf(_loc_itf);
239 for_each(_gloc_itf.begin(), _gloc_itf.end(), [this] (auto & v) -> void { v = _v_l2g[v];} );
240 //MPI_Barrier(_icomm);
241 //if (0 == MyRank()) cout << "\n..._gloc_itf...\n" << _gloc_itf << "\n......\n";
242 //DebugVector(_gloc_itf,"_gloc_itf");
243
244 // ---- Determine the global length of interfaces
245 vector<int> vnn(NumProcs(), -1); // number of interface vertices per MPI rank
246 int l_itf(_loc_itf.size()); // # local interface vertices
247 ierr = MPI_Allgather(&l_itf, 1, MPI_INT, vnn.data(), 1, MPI_INT, _icomm);
248 assert(0 == ierr);
249 //cout << vnn << endl;
250
251 // ---- Now we consider only the inferface vertices
252 int snn = accumulate(vnn.cbegin(), vnn.cend(), 0); // required length of array for global interface indices
253 //cout << snn << " " << gnn << endl;
254 vector<int> dispnn(NumProcs(), 0) ; // displacement of interface vertices per MPI rank
255 partial_sum(vnn.cbegin(), vnn.cend() - 1, dispnn.begin() + 1);
256 //cout << dispnn << endl;
257
258 // ---- Get the global indices for all global interfaces
259 vector<int> g_itf(snn, -1); // collects all global indices of the global interfaces
260 // https://www.mpich.org/static//docs/v3.0.x/www3/MPI_Gatherv.html
261 ierr = MPI_Gatherv( _gloc_itf.data(), _gloc_itf.size(), MPI_INT,
262 g_itf.data(), vnn.data(), dispnn.data(), MPI_INT, 0, _icomm);
263 assert(0 == ierr);
264 // https://www.mpich.org/static/docs/v3.1/www3/MPI_Bcast.html
265 ierr = MPI_Bcast(g_itf.data(), g_itf.size(), MPI_INT, 0, _icomm);
266 assert(0 == ierr); // Now, each MPI rank has the all global indices of the global interfaces
267 //MPI_Barrier(_icomm);
268 //if (MyRank() == 0) cout << "\n...g_itf...\n" << g_itf << "\n......\n";
269 //MPI_Barrier(_icomm);
270
271 // ----- Determine all MPI ranks a local interface vertex belongs to
272 vector<vector<int>> neigh_itf(_loc_itf.size());// subdomains a local interface vertex belongs to
273 for (size_t lk = 0; lk < _loc_itf.size(); ++lk) {
274 const int gvert = _gloc_itf[lk]; // global index of local interface node lk
275 for (int rank = 0; rank < NumProcs(); ++rank) {
276 auto const startl = g_itf.cbegin() + dispnn[rank];
277 auto const endl = startl + vnn[rank];
278 if ( find( startl, endl, gvert) != endl) {
279 neigh_itf[lk].push_back(rank);
280 }
281 }
282 }
283
284 // ---- check the available info in _loc_itf[lk], _gloc_itf[lk], neigh_itf[lk]
285 //MPI_Barrier(_icomm);
287 //if (MyRank() == 0) {
288 //for (size_t lk = 0; lk < _loc_itf.size(); ++lk ) {
289 //cout << lk << " : local idx " << _loc_itf[lk] << " , global idx " << _gloc_itf[lk];
290 //cout << " with MPI ranks " << neigh_itf[lk] << endl;
291 //}
292 //}
293 //MPI_Barrier(_icomm);
294
295 // ---- store the valence (e.g., the number of subdomains it belongs to) of all local vertices
296 _valence.resize(Nnodes(),1);
297 for (size_t lk = 0; lk < _loc_itf.size(); ++lk)
298 {
299 _valence[_loc_itf[lk]] = neigh_itf[lk].size();
300 }
301 //DebugVector(_valence,"_valence",_icomm);
302
303 // ---- We ware going to use MPI_Alltoallv for data exchange on interfaces
304 // https://www.mpi-forum.org/docs/mpi-3.1/mpi31-report/node109.htm#Node109
305 // https://www.open-mpi.org/doc/v4.0/man3/MPI_Alltoallv.3.php
306 //int MPI_Alltoallv(const void* sendbuf, const int sendcounts[], const int sdispls[], MPI_Datatype sendtype, void* recvbuf, const int recvcounts[], const int rdispls[], MPI_Datatype recvtype, MPI_Comm comm)
307 //
308 // MPI_Alltoallv needs:
309 // vector<double> sendbuf (MPI_IN_PLACE: used also as recvbuf)
310 // vector<int> sendcounts (the same as for recv)
311 // vector<int> sdispls (the same as for recv)
312 //
313 // We need to map the interface vertices onto the sendbuffer:
314 // vector<int> loc_itf local index of interface vertex lk
315 // vector<int> gloc_itf global index of interface vertex lk
316 // vector<int> buf2loc local indices of sendbuffer positions (the same as for recv)
317
318 // ---- Determine sendcounts[] and sdipls[] from neigh_itf[]
319 //vector<int> _sendcounts(NumProcs(), 0);
320 _sendcounts.resize(NumProcs(), 0);
321 for (size_t lk = 0; lk < _loc_itf.size(); ++lk ) {
322 auto const &kneigh = neigh_itf[lk];
323 for (size_t ns = 0; ns < kneigh.size(); ++ns) {
324 ++_sendcounts[kneigh[ns]];
325 }
326 }
327 //if (MyRank() == 0) cout << "\n..._sendcounts ...\n" << _sendcounts << endl;
328
329 //vector<int> _sdispls(NumProcs(), 0);
330 _sdispls.resize(NumProcs(), 0);
331 partial_sum(_sendcounts.cbegin(), _sendcounts.cend() - 1, _sdispls.begin() + 1);
332 //vector<int> _sdispls(NumProcs()+1, 0);
333 //partial_sum(_sendcounts.cbegin(), _sendcounts.cend(), _sdispls.begin() + 1);
334 //if (MyRank() == 0) cout << "\n..._sdispls ...\n" << _sdispls << endl;
335
336 // ---- Determine size of buffer 'nbuffer' and mapping 'buf2loc'
337 int const nbuffer = accumulate(_sendcounts.cbegin(), _sendcounts.cend(), 0);
338 //vector<int> _buf2loc(nbuffer, -1);
339 _buf2loc.resize(nbuffer, -1);
340 int buf_idx = 0; // position in buffer
341 for (int rank = 0; rank < NumProcs(); ++rank) {
342 assert( buf_idx == _sdispls[rank]);
343 for (size_t lk = 0; lk < _loc_itf.size(); ++lk ) {
344 auto const &kneigh = neigh_itf[lk];
345 if (find(kneigh.cbegin(),kneigh.cend(),rank)!=kneigh.cend())
346 {
347 _buf2loc[buf_idx] = _loc_itf[lk];
348 ++buf_idx;
349 }
350 }
351 }
352 //if (MyRank() == 0) cout << "\n...buf2loc ...\n" << buf2loc << endl;
353 //DebugVector(buf2loc,"buf2loc",_icomm);
354
355 // ---- Allocate send/recv buffer
356 //vector<double> _sendbuf(nbuffer,-1.0);
357 _sendbuf.resize(nbuffer,-1.0);
358
360 cout << " Check of data exchange (InPlace) successful!\n";
361 assert(CheckInterfaceExchange());
362 cout << " Check of data exchange successful!\n";
364 cout << " Check of data add successful!\n";
365 assert(CheckInterfaceAdd());
366 cout << " Check of data add (InPlace) successful!\n";
367
368 vector<double> x(Nnodes(),-1.0);
369 VecAccu(x);
370 cout << " VecAccu (InPlace) successful!\n";
371
372
373 return;
374}
375
377{
378 vector<double> x(Nnodes(),-1.0);
379 copy(_v_l2g.cbegin(),_v_l2g.cend(),x.begin()); // init x with global vertex indices
380
381 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
382 {
383 _sendbuf[ls] = x[_buf2loc.at(ls)];
384 }
385 int ierr = MPI_Alltoallv(MPI_IN_PLACE, _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
386 _sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
387 assert(ierr==0);
388 //DebugVector(_sendbuf,"_sendbuf",_icomm);
389
390 vector<double> y(x);
391 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = -1.0; // only for interface nodes
392 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
393 {
394 y[_buf2loc.at(ls)] = _sendbuf[ls];
395 }
396
397 double const eps=1e-10;
398 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
399 [eps](double a, double b) -> bool
400 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
401 );
402 return bv;
403}
404
406{
407 vector<double> x(Nnodes(),-1.0);
408 copy(_v_l2g.cbegin(),_v_l2g.cend(),x.begin()); // init x with global vertex indices
409
410 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
411 {
412 _sendbuf[ls] = x[_buf2loc.at(ls)];
413 }
414 vector<double> recvbuf(_sendbuf.size());
415 int ierr = MPI_Alltoallv(_sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
416 recvbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
417 //DebugVector(_sendbuf,"_sendbuf",_icomm);
418 //DebugVector(recvbuf,"recvbuf",_icomm);
419 assert(ierr==0);
420
421 vector<double> y(x);
422 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = -1.0; // only for interface nodes
423 for(size_t ls = 0; ls<recvbuf.size(); ++ls)
424 {
425 y[_buf2loc.at(ls)] = recvbuf[ls];
426 }
427 //cout << "WRONG : " << count(y.cbegin(),y.cend(), -1.0) << endl;
428
429 double const eps=1e-10;
430 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
431 [eps](double a, double b) -> bool
432 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
433 );
434 return bv;
435}
436
438{
439 vector<double> x(Nnodes(),-1.0);
440 for (size_t i=0; i<x.size(); ++i)
441 {
442 x[i] = _xc[2*i]+_xc[2*i+1]; // init x with coordinate values
443 }
444
445 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
446 {
447 _sendbuf[ls] = x[_buf2loc.at(ls)];
448 }
449 int ierr = MPI_Alltoallv(MPI_IN_PLACE, _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
450 _sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
451 assert(ierr==0);
452 //DebugVector(_sendbuf,"_sendbuf",_icomm);
453
454 vector<double> y(x);
455 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = 0.0; // only for interface nodes
456 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
457 {
458 y[_buf2loc.at(ls)] += _sendbuf[ls];
459 }
460 MPI_Barrier(_icomm);
461 //DebugVector(x,"x",_icomm);
462 //DebugVector(y,"y",_icomm);
463 for (size_t i= 0; i<y.size(); ++i) y[i]/=_valence[i]; // divide by valence
464
465 double const eps=1e-10;
466 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
467 [eps](double a, double b) -> bool
468 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
469 );
470 return bv;
471}
472
474{
475 vector<double> x(Nnodes(),-1.0);
476 for (size_t i=0; i<x.size(); ++i)
477 {
478 //x[i] = _xc[2*i]+_xc[2*i+1]; // init x with coordinate values
479 x[i] = _v_l2g[i];
480 }
481
482 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
483 {
484 _sendbuf[ls] = x[_buf2loc.at(ls)];
485 }
486 vector<double> recvbuf(_sendbuf.size());
487 int ierr = MPI_Alltoallv(_sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
488 recvbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
489 //DebugVector(_sendbuf,"_sendbuf",_icomm);
490 //DebugVector(recvbuf,"recvbuf",_icomm);
491 assert(ierr==0);
492
493 vector<double> y(x);
494 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) y[_loc_itf.at(lk)] = 0.0; // only for interface nodes
495 for(size_t ls = 0; ls<recvbuf.size(); ++ls)
496 {
497 //if (0==MyRank()) cout << ls << ": " << _buf2loc.at(ls) << " " << y[_buf2loc.at(ls)] << "("<< x[_buf2loc.at(ls)] << ")" << " " << recvbuf[ls] << " (" << _sendbuf[ls] << ")" << endl;
498 y[_buf2loc.at(ls)] += recvbuf[ls];
499 }
500 MPI_Barrier(_icomm);
501 //DebugVector(x,"x",_icomm);
502 //DebugVector(y,"y",_icomm);
503 for (size_t i= 0; i<y.size(); ++i) y[i]/=_valence[i]; // divide by valence
504
505 double const eps=1e-10;
506 bool bv = equal(x.cbegin(),x.cend(),y.cbegin(),
507 [eps](double a, double b) -> bool
508 { return std::abs(a-b)<eps*(1.0+0.5*(std::abs(a)+ std::abs(b))); }
509 );
510 return bv;
511}
512
513
514// ----------
515
516void ParMesh::VecAccu(std::vector<double> &w) const
517{
518 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
519 {
520 _sendbuf[ls] = w[_buf2loc.at(ls)];
521 }
522 int ierr = MPI_Alltoallv(MPI_IN_PLACE, _sendcounts.data(), _sdispls.data(), MPI_DOUBLE,
523 _sendbuf.data(), _sendcounts.data(), _sdispls.data(), MPI_DOUBLE, _icomm);
524 assert(ierr==0);
525 //DebugVector(_sendbuf,"_sendbuf",_icomm);
526
527 for(size_t lk = 0; lk<_loc_itf.size(); ++lk) w[_loc_itf.at(lk)] = 0.0; // only for interface nodes
528 for(size_t ls = 0; ls<_sendbuf.size(); ++ls)
529 {
530 w[_buf2loc.at(ls)] += _sendbuf[ls];
531 }
532
533 return;
534}
535
536
537
538
539
540// ----------------------
541/*
542 manjaro> matlab
543 MATLAB is selecting SOFTWARE OPENGL rendering.
544 /usr/local/MATLAB/R2019a/bin/glnxa64/MATLAB: error while loading shared libraries: libcrypt.so.1: cannot open shared object file: No such file or directory
545
546 SOLUTION: sudo pacman -S libxcrypt-compat + reboot
547*/
548
549void ParMesh::Visualize(vector<double> const &v) const
550{
551 // define external command, but we have to pass the number of subdomains
552 string const MatlabScript{"visualize_par_results("+ to_string(_numprocs) + ")"};
553
554 // define the command to be executed
555 string const exec_m("matlab -nosplash -nodesktop -r '" + MatlabScript + "; quit'"); // Matlab
556 //string const exec_m("octave --no-window-system --no-gui '"+MatlabScript+"'"); // Octave, until version 6.3
557 //string const exec_m("octave --no-gui --eval '"+MatlabScript+"'"); // Octave since version 6.4
558 //string const exec_m("flatpak run org.octave.Octave --eval '"+MatlabScript+"'"); // Octave (flatpak): desktop GH
559
560 // old calls
561 //const string exec_m("matlab -nosplash -nodesktop -r 'try visualize_par_results("+ to_string(_numprocs) + "); catch; end; quit'"); // Matlab old
562 //const string exec_m("octave --no-window-system --no-gui visualize_par_results.m"); // Octave old
563
564 const string pre{"uv_"};
565 const string post{".txt"};
566 const string fname(pre + to_string(_myrank) + post);
567
568 if (0 == _myrank)
569 {
570 cout << exec_m << endl;
571 cout << fname << endl;
572 }
573
574 for (int p = 0; p <= NumProcs(); ++p) {
575 if (MyRank() == p) Write_ascii_matlab(fname, v);
576 MPI_Barrier(_icomm);
577 }
578
579 MPI_Barrier(_icomm);
580 if (0 == _myrank)
581 {
582 int ierror = system(exec_m.c_str()); // call external command
583 if (ierror != 0)
584 {
585 cout << endl << "Check path to Matlab/octave on your system" << endl;
586 }
587 cout << endl;
588 }
589 MPI_Barrier(_icomm);
590
591 return;
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
tmp(:,:).'
Definition geom.h:14
std::vector< int > _sdedges
boundary edges [nbedges][2] with left/right subdomain number
Definition geom.h:434
std::vector< int > _ia
element connectivity
Definition geom.h:427
void DeriveEdgeFromVertexBased()
Definition geom.h:362
std::vector< double > _xc
coordinates
Definition geom.h:428
int Ndims() const
Definition geom.h:98
int Nelems() const
Definition geom.h:62
void Write_ascii_matlab(std::string const &fname, std::vector< double > const &v) const
Definition geom.cpp:123
int Nnodes() const
Definition geom.h:89
std::vector< int > _bedges
boundary edges [nbedges][2] storing start/end vertex
Definition geom.h:432
int NverticesElements() const
Definition geom.h:71
void SetNnode(int nnode)
Definition geom.h:333
void Del_EdgeConnectivity()
Definition geom.cpp:800
void ReadVertexBasedMesh(std::string const &fname)
Definition geom.cpp:712
void SetNelem(int nelem)
Definition geom.h:318
void Generate_VectorAdd()
Definition par_geom.cpp:191
void Visualize(std::vector< double > const &v) const override
Definition par_geom.cpp:549
std::vector< int > _v_l2g
vertices: local to global mapping
Definition par_geom.h:138
int _numprocs
number of MPI processes
Definition par_geom.h:136
MPI_Comm const _icomm
MPI communicator for the group of processes.
Definition par_geom.h:135
std::vector< int > _sendcounts
number of data to send to each MPI rank (the same as for recv)
Definition par_geom.h:148
bool CheckInterfaceAdd_InPlace() const
Definition par_geom.cpp:437
bool CheckInterfaceExchange() const
Definition par_geom.cpp:405
std::vector< int > _gloc_itf
global index of interface vertex lk
Definition par_geom.h:153
std::vector< int > _sdispls
offset of data to send to each MPI rank wrt. _senbuffer (the same as for recv)
Definition par_geom.h:149
std::vector< int > ReadElementSubdomains(std::string const &dname)
Definition par_geom.cpp:74
std::map< int, int > _v_g2l
vertices: global to local mapping
Definition par_geom.h:140
bool CheckInterfaceExchange_InPlace() const
Definition par_geom.cpp:376
virtual ~ParMesh()
Definition par_geom.cpp:32
std::vector< double > _sendbuf
send buffer a n d receiving buffer (MPI_IN_PLACE)
Definition par_geom.h:147
int _myrank
my MPI rank
Definition par_geom.h:137
std::vector< int > _loc_itf
local index of interface vertex lk
Definition par_geom.h:152
int MyRank() const
Definition par_geom.h:111
std::map< int, int > _t_g2l
triangles: global to local mapping
Definition par_geom.h:141
void VecAccu(std::vector< double > &w) const
Definition par_geom.cpp:516
ParMesh(int ndim, int nvert_e=0, int ndof_e=0, int nedge_e=0, MPI_Comm const &icomm=MPI_COMM_WORLD)
Definition par_geom.cpp:21
int NumProcs() const
Definition par_geom.h:120
std::vector< int > _t_l2g
triangles: local to global mapping
Definition par_geom.h:139
bool CheckInterfaceAdd() const
Definition par_geom.cpp:473
std::vector< int > _valence
valence of local vertices, i.e. number of subdomains they belong to
Definition par_geom.h:145
void Transform_Local2Global_Vertex(int myrank, std::vector< int > const &t2d)
Definition par_geom.cpp:105
std::vector< int > _buf2loc
local indices of sendbuffer positions (the same as for recv)
Definition par_geom.h:154
angle in trangle e
Definition square_bb_4.m:62
works correctly p(1, 15)