Zoltan2
Loading...
Searching...
No Matches
multivectorTest.cpp
Go to the documentation of this file.
1// @HEADER
2// ***********************************************************************
3// Copyright message goes here.
4// ***********************************************************************
5// @HEADER
6
7// Create a Tpetra::MultiVector, and time the following:
8// 1. Build a multivector with contiguous global ids.
9// 2. Migrate.
10// 3. Divide procs into two groups and build new a new multivector
11// in each group with the non-contiguous global ids of the migrated data.
12// 4. Migrate the new multivector in the subgroup.
13//
14// Repeat this using Epetra, and also Zoltan1.
15//
16// This mimics the recursive bisection algorithm in Zoltan2.
17//
18// Because global ordinals are "int" in the this test, Zoltan1 must be
19// configured with cmake option ZOLTAN_ENABLE_UINT_IDS.
20
21#include <Teuchos_DefaultComm.hpp>
22#include <Teuchos_TimeMonitor.hpp>
23#include <Tpetra_MultiVector.hpp>
24#include <Tpetra_Import.hpp>
25
26#include <Epetra_Vector.h>
27#include <Epetra_MpiComm.h>
28#include <Epetra_Import.h>
29
30#include <zoltan.h>
31
32#include <Zoltan2_Util.hpp>
33
34#include <string>
35#include <sstream>
36#include <iostream>
37
38using Teuchos::RCP;
39using Teuchos::rcp;
40using Teuchos::rcp_dynamic_cast;
41using Teuchos::ArrayRCP;
42using Teuchos::ArrayView;
43using Teuchos::TimeMonitor;
44using Teuchos::Time;
45
46#ifdef HAVE_MPI
47
48typedef int TPETRA_LO;
49typedef int TPETRA_GO;
50typedef unsigned int ZOLTAN_LO;
51typedef unsigned int ZOLTAN_GO;
52#define MPI_ZOLTAN_GO_TYPE MPI_UNSIGNED
53typedef double TPETRA_SCALAR;
54
55RCP<Time> tmvBuild;
56RCP<Time> tmvMigrate;
57RCP<Time> tmvBuildN;
58RCP<Time> tmvMigrateN;
59
60RCP<Time> emvBuild;
61RCP<Time> emvMigrate;
62RCP<Time> emvBuildN;
63RCP<Time> emvMigrateN;
64
65RCP<Time> ztnBuild;
66RCP<Time> ztnMigrate;
67RCP<Time> ztnBuildN;
68RCP<Time> ztnMigrateN;
69
70using Teuchos::MpiComm;
71using Teuchos::Comm;
72
73enum testConstants {COORDDIM=3};
74
75static void usage(char *arg[]){
76 std::cout << "Usage:" << std::endl;
77 std::cout << arg[0] << " {num coords}" << std::endl;
78}
79
81// Generate global Ids for different mappings
83TPETRA_LO numSequentialGlobalIds(TPETRA_GO numGlobalCoords, int nprocs, int rank)
84{
85 TPETRA_LO share = numGlobalCoords / nprocs;
86 TPETRA_LO extra = numGlobalCoords % nprocs;
87 TPETRA_LO numLocalCoords = share;
88 if (rank < extra)
89 numLocalCoords++;
90
91 return numLocalCoords;
92}
93template <typename T>
94void roundRobinGlobalIds(T numGlobalCoords, int nprocs, int rank,
95 T *&gids)
96{
97 // Local number of coordinates does not change.
98 T share = numGlobalCoords / nprocs;
99 T extra = numGlobalCoords % nprocs;
100 T numLocalCoords = share;
101 if (static_cast<T> (rank) < extra)
102 numLocalCoords++;
103
104 gids = new T [numLocalCoords];
105 if (!gids)
106 throw std::bad_alloc();
107
108 T next = 0;
109 for (T i=rank; i < numGlobalCoords; i+=nprocs)
110 gids[next++] = i;
111
112 return;
113}
114template <typename T>
115void subGroupGloballyIncreasingIds(T numGlobalCoords,
116 int nprocs, int rank, T *&gids)
117{
118 int numProcsLeftHalf = nprocs / 2;
119 T share = numGlobalCoords / nprocs;
120 T extra = numGlobalCoords % nprocs;
121 T numCoordsLeftHalf = 0;
122 T firstIdx = 0, endIdx = 0;
123
124 T endP = ((numProcsLeftHalf > rank) ? numProcsLeftHalf : rank);
125
126 for (T p=0; p < endP ; p++){
127 T numLocalCoords = share;
128 if (p < extra)
129 numLocalCoords++;
130
131 if (p < static_cast<T> (rank))
132 firstIdx += numLocalCoords;
133
134 if (p < static_cast<T> (numProcsLeftHalf))
135 numCoordsLeftHalf += numLocalCoords;
136 }
137
138 endIdx = firstIdx + share;
139 if (rank < int(extra))
140 endIdx++;
141
142 if (rank >= numProcsLeftHalf){
143 firstIdx -= numCoordsLeftHalf;
144 endIdx -= numCoordsLeftHalf;
145 }
146
147 int firstProc=0, endProc=0;
148
149 if (rank < numProcsLeftHalf){
150 firstProc = 0;
151 endProc = numProcsLeftHalf;
152 }
153 else{
154 firstProc = numProcsLeftHalf;
155 endProc = nprocs;
156 }
157
158 int numProcsInMyHalf = endProc - firstProc;
159
160 // Picture the round robin global ids as a matrix where the
161 // columns are the processes and row values are global ids.
162 // Row values start at 0 in the upper left corner and increase
163 // to nprocs-1 in the upper right corner. The values in
164 // the second row are nprocs greater than the first row,
165 // and so on.
166 //
167 // The processes were divided into two halves, represented
168 // by a vertical line through the matrix dividing the
169 // processes in the left half from the processes in the
170 // right half.
171 //
172 // Now we want to enumerate the global ids in my half
173 // in increasing order.
174
175 T numLocalCoords = endIdx - firstIdx;
176 gids = new T [numLocalCoords];
177 if (!gids)
178 throw std::bad_alloc();
179
180 T firstRow = firstIdx / numProcsInMyHalf;
181 T firstCol = (firstIdx % numProcsInMyHalf) + firstProc;
182 int next = 0;
183
184 for (T row = firstRow; static_cast<T> (next) < numLocalCoords; row++){
185 T firstGid = row * nprocs + firstCol;
186 for (T col = firstCol; static_cast<T> (next) < numLocalCoords && col < static_cast<T> (endProc); col++){
187 gids[next++] = firstGid++;
188 }
189 firstCol = firstProc;
190 }
191}
192
193void timeEpetra(TPETRA_GO numGlobalCoords, const RCP<const MpiComm<int> > &comm, bool);
194void
195timeTpetra (const TPETRA_GO numGlobalCoords,
196 const Teuchos::RCP<const Teuchos::Comm<int> >& comm,
197 const bool doMemory);
198void timeZoltan(ZOLTAN_GO numGlobalCoords, bool);
199
201// Main
203
204int main(int narg, char *arg[])
205{
206 Tpetra::ScopeGuard tscope(&narg, &arg);
207 Teuchos::RCP<const Teuchos::Comm<int> > genComm = Tpetra::getDefaultComm();
208 Teuchos::RCP<const MpiComm<int> > comm =
209 rcp_dynamic_cast<const MpiComm<int> >(genComm);
210
211 int rank = genComm->getRank();
212
213 if (narg < 2){
214 if (rank == 0)
215 usage(arg);
216 return 1;
217 }
218
219 TPETRA_GO numGlobalCoords = 0;
220 std::string theArg(arg[1]);
221 std::istringstream iss(theArg);
222 iss >> numGlobalCoords;
223 if (numGlobalCoords < genComm->getSize()){
224 if (rank == 0)
225 usage(arg);
226 return 1;
227 }
228
229 if (rank == 0)
230 std::cout << numGlobalCoords << " coordinates" << std::endl;
231
232 tmvBuild = TimeMonitor::getNewTimer("CONSEC build Tpetra");
233 tmvMigrate = TimeMonitor::getNewTimer("CONSEC migrate Tpetra");
234 tmvBuildN = TimeMonitor::getNewTimer("!CONSEC build Tpetra");
235 tmvMigrateN = TimeMonitor::getNewTimer("!CONSEC migrate Tpetra");
236
237 ztnBuild = TimeMonitor::getNewTimer("CONSEC build Zoltan1");
238 ztnMigrate = TimeMonitor::getNewTimer("CONSEC migrate Zoltan1");
239 ztnBuildN = TimeMonitor::getNewTimer("!CONSEC build Zoltan1");
240 ztnMigrateN = TimeMonitor::getNewTimer("!CONSEC migrate Zoltan1");
241
242 emvBuild = TimeMonitor::getNewTimer("CONSEC build Epetra");
243 emvMigrate = TimeMonitor::getNewTimer("CONSEC migrate Epetra");
244 emvBuildN = TimeMonitor::getNewTimer("!CONSEC build Epetra");
245 emvMigrateN = TimeMonitor::getNewTimer("!CONSEC migrate Epetra");
246
247 TimeMonitor::zeroOutTimers();
248
249 int ntests = 3;
250
251 // Test with Zoltan_Comm and Zoltan_DataDirectory
252
253 for (int i=0; i < ntests; i++){
254 if (rank == 0)
255 std::cout << "Zoltan test " << i+1 << std::endl;
256 timeZoltan(numGlobalCoords, i==ntests-1);
257 }
258
259 // Test with Epetra_MultiVector
260
261 for (int i=0; i < ntests; i++){
262 if (rank == 0)
263 std::cout << "Epetra test " << i+1 << std::endl;
264 timeEpetra(numGlobalCoords, comm, i==ntests-1);
265 }
266
267 // Test with Tpetra::MultiVector
268
269 for (int i=0; i < ntests; i++){
270 if (rank == 0)
271 std::cout << "Tpetra test " << i+1 << std::endl;
272 timeTpetra(numGlobalCoords, comm, i==ntests-1);
273 }
274
275 // Report
276
277 TimeMonitor::summarize();
278
279 if (comm->getRank() == 0)
280 std::cout << "PASS" << std::endl;
281}
282
283void
284timeTpetra (const TPETRA_GO numGlobalCoords,
285 const Teuchos::RCP<const Teuchos::Comm<int> >& comm,
286 const bool doMemory)
287{
288 using Teuchos::arcp;
289 using Teuchos::ArrayRCP;
290 using Teuchos::ArrayView;
291 using Teuchos::Comm;
292 using Teuchos::RCP;
293 using Teuchos::rcp;
294 typedef Tpetra::Map<TPETRA_LO, TPETRA_GO> map_type;
295 typedef Tpetra::MultiVector<TPETRA_SCALAR, TPETRA_LO, TPETRA_GO> MV;
296 typedef ArrayView<const TPETRA_SCALAR> coordList_t;
297
298 const int nprocs = comm->getSize ();
299 const int rank = comm->getRank ();
300
302 // Create a MV with contiguous global IDs
303
304 const TPETRA_LO numLocalCoords = numSequentialGlobalIds(numGlobalCoords, nprocs, rank);
305
306 RCP<const map_type> tmap;
307 RCP<MV> mvector;
308 TPETRA_SCALAR* coords = NULL;
309 {
310 Teuchos::TimeMonitor timeMon (*tmvBuild);
311
312 tmap = rcp (new map_type (numGlobalCoords, numLocalCoords, 0, comm));
313
314 coords = new TPETRA_SCALAR [COORDDIM * numLocalCoords];
315 memset (coords, 0, sizeof(TPETRA_SCALAR) * numLocalCoords * COORDDIM);
316
317 coordList_t *avList = new coordList_t [COORDDIM];
318 TPETRA_LO offset = 0;
319
320 for (int dim = 0; dim < COORDDIM; ++dim) {
321 avList[dim] = coordList_t(coords + offset, numLocalCoords);
322 offset += numLocalCoords;
323 }
324
325 ArrayRCP<const coordList_t> vectors = arcp (avList, 0, COORDDIM);
326 mvector = rcp (new MV (tmap, vectors.view (0, COORDDIM), COORDDIM));
327 }
328
329 if (rank == 0 && doMemory) {
330 const long nkb = Zoltan2::getProcessKilobytes ();
331 std::cout << "Create mvector 1: " << nkb << std::endl;
332 }
333
335 // Migrate the MV.
336
337 ArrayRCP<const TPETRA_GO> newGidArray;
338 {
339 TPETRA_GO *newGids = NULL;
340 roundRobinGlobalIds<TPETRA_GO> (numGlobalCoords, nprocs, rank, newGids);
341 newGidArray = arcp<const TPETRA_GO> (newGids, 0, numLocalCoords, true);
342 }
343
344 RCP<const map_type> newTmap;
345 RCP<Tpetra::Import<TPETRA_LO, TPETRA_GO> > importer;
346 RCP<MV> newMvector;
347 {
348 Teuchos::TimeMonitor timeMon (*tmvMigrate);
349
350 newTmap = rcp (new map_type (numGlobalCoords, newGidArray.view(0, numLocalCoords), 0, comm));
351 importer = rcp (new Tpetra::Import<TPETRA_LO, TPETRA_GO> (tmap, newTmap));
352 newMvector = rcp (new MV (newTmap, COORDDIM, true));
353
354 newMvector->doImport (*mvector, *importer, Tpetra::INSERT);
355 mvector = newMvector;
356 }
357
358 delete [] coords;
359
360 if (rank == 0 && doMemory) {
361 const long nkb = Zoltan2::getProcessKilobytes ();
362 std::cout << "Create mvector 2: " << nkb << std::endl;
363 }
364
366 // Divide processes into two halves.
367
368 RCP<Comm<int> > subComm;
369 {
370 int groupSize = 0;
371 int leftHalfNumProcs = nprocs / 2;
372 int *myHalfProcs = NULL;
373
374 if (rank < leftHalfNumProcs){
375 groupSize = leftHalfNumProcs;
376 myHalfProcs = new int [groupSize];
377 for (int i=0; i < groupSize; i++)
378 myHalfProcs[i] = i;
379 }
380 else {
381 groupSize = nprocs - leftHalfNumProcs;
382 myHalfProcs = new int [groupSize];
383 int firstNum = leftHalfNumProcs;
384 for (int i=0; i < groupSize; i++)
385 myHalfProcs[i] = firstNum++;
386 }
387
388 ArrayView<const int> idView(myHalfProcs, groupSize);
389 subComm = comm->createSubcommunicator (idView);
390 delete [] myHalfProcs;
391 }
392
393 // Divide the multivector into two. Each process group is creating
394 // a multivector with non-contiguous global ids. For one group,
395 // base gid is not 0.
396
397 size_t globalSize = Teuchos::OrdinalTraits<size_t>::invalid ();
398 RCP<map_type> subMap;
399 RCP<MV> subMvector;
400 {
401 Teuchos::TimeMonitor timeMon (*tmvBuildN);
402
403 ArrayView<const TPETRA_GO> gidList = mvector->getMap ()->getLocalElementList ();
404 subMap = rcp (new map_type (globalSize, gidList, 0, subComm));
405 globalSize = subMap->getGlobalNumElements ();
406
407 // Get a view of the block of rows to copy.
408 RCP<MV> tmp = mvector->offsetViewNonConst (subMap, 0);
409 // Create a new multivector to hold the group's rows.
410 subMvector = rcp (new MV (subMap, mvector->getNumVectors ()));
411 // Copy the view into the new multivector.
412 Tpetra::deep_copy (*subMvector, *tmp);
413 }
414
416 // Each subgroup migrates the sub-multivector so the
417 // global Ids are increasing with process rank.
418
419 TPETRA_GO *increasingGids = NULL;
420 subGroupGloballyIncreasingIds<TPETRA_GO> (numGlobalCoords, nprocs,
421 rank, increasingGids);
422
423 ArrayRCP<const TPETRA_GO> incrGidArray (increasingGids, 0, numLocalCoords, true);
424
425 RCP<const map_type> newSubMap;
426 RCP<Tpetra::Import<TPETRA_LO, TPETRA_GO> > subImporter;
427 RCP<MV> newSubMvector;
428 {
429 Teuchos::TimeMonitor timeMon (*tmvMigrateN);
430
431 newSubMap =
432 rcp (new map_type (globalSize, incrGidArray.view (0, numLocalCoords),
433 0, subComm));
434 subImporter = rcp (new Tpetra::Import<TPETRA_LO, TPETRA_GO> (subMap, newSubMap));
435 newSubMvector = rcp (new MV (newSubMap, COORDDIM, true));
436 newSubMvector->doImport (*subMvector, *subImporter, Tpetra::INSERT);
437 mvector = newSubMvector;
438 }
439}
440
441void timeEpetra(TPETRA_GO numGlobalCoords, const RCP<const MpiComm<int> > &comm,
442 bool doMemory)
443{
444 RCP<const Teuchos::OpaqueWrapper<MPI_Comm> > commPtr =
445 comm->getRawMpiComm();
446
447 RCP<Epetra_MpiComm> ecomm = rcp(new Epetra_MpiComm((*commPtr)()));
448
449 int nprocs = comm->getSize();
450 int rank = comm->getRank();
451
453 // Create a MV with contiguous global IDs
454
455 TPETRA_LO numLocalCoords = numSequentialGlobalIds(numGlobalCoords, nprocs, rank);
456
457 emvBuild->start();
458 emvBuild->incrementNumCalls();
459
460 RCP<Epetra_BlockMap> emap = rcp(new Epetra_BlockMap(numGlobalCoords,
461 numLocalCoords, 1, 0, *ecomm));
462
463 TPETRA_SCALAR *coords = new TPETRA_SCALAR [COORDDIM * numLocalCoords];
464 memset(coords, 0, sizeof(TPETRA_SCALAR) * numLocalCoords * COORDDIM);
465
466 RCP<Epetra_MultiVector> mvector = rcp(new Epetra_MultiVector(
467 View, *emap, coords, 1, COORDDIM));
468
469 emvBuild->stop();
470
471 if (rank==0 && doMemory){
472 long nkb = Zoltan2::getProcessKilobytes();
473 std::cout << "Create mvector 1: " << nkb << std::endl;;
474 }
475
477 // Migrate the MV.
478
479 TPETRA_GO *newGids = NULL;
480 roundRobinGlobalIds<TPETRA_GO>(numGlobalCoords, nprocs, rank, newGids);
481
482 emvMigrate->start();
483 emvMigrate->incrementNumCalls();
484
485 RCP<Epetra_BlockMap> newMap = rcp(new Epetra_BlockMap(numGlobalCoords,
486 numLocalCoords, newGids, 1, 0, *ecomm));
487
488 RCP<Epetra_Import> importer = rcp(new Epetra_Import(*newMap, *emap));
489
490 RCP<Epetra_MultiVector> newMvector = rcp(new Epetra_MultiVector(
491 *newMap, COORDDIM));
492
493 newMvector->Import(*mvector, *importer, Insert);
494
495 mvector = newMvector;
496
497 emvMigrate->stop();
498
499 delete [] coords;
500
501 if (rank==0 && doMemory){
502 long nkb = Zoltan2::getProcessKilobytes();
503 std::cout << "Create mvector 2: " << nkb << std::endl;;
504 }
505
507 // Divide processes into two halves.
508
509 int groupSize = 0;
510 int leftHalfNumProcs = nprocs / 2;
511 int *myHalfProcs = NULL;
512
513 if (rank < leftHalfNumProcs){
514 groupSize = leftHalfNumProcs;
515 myHalfProcs = new int [groupSize];
516 for (int i=0; i < groupSize; i++)
517 myHalfProcs[i] = i;
518 }
519 else {
520 groupSize = nprocs - leftHalfNumProcs;
521 myHalfProcs = new int [groupSize];
522 int firstNum = leftHalfNumProcs;
523 for (int i=0; i < groupSize; i++)
524 myHalfProcs[i] = firstNum++;
525 }
526
527 ArrayView<const int> idView(myHalfProcs, groupSize);
528 // TODO - memory leak in createSubcommunicator.
529 RCP<Comm<int> > newComm = comm->createSubcommunicator(idView);
530 RCP<MpiComm<int> > genSubComm = rcp_dynamic_cast<MpiComm<int> >(newComm);
531
532 commPtr = genSubComm->getRawMpiComm();
533
534 RCP<Epetra_MpiComm> subComm = rcp(new Epetra_MpiComm((*commPtr)()));
535
536 delete [] myHalfProcs;
537
538 // Divide the multivector into two. Each process group is creating
539 // a multivector with non-contiguous global ids. For one group,
540 // base gid is not 0.
541
542 emvBuildN->start();
543 emvBuildN->incrementNumCalls();
544
545 RCP<Epetra_BlockMap> subMap = rcp(new Epetra_BlockMap(-1,
546 numLocalCoords, newGids, 1, 0, *subComm));
547
548 TPETRA_SCALAR **avSubList = new TPETRA_SCALAR * [COORDDIM];
549
550 for (int dim=0; dim < COORDDIM; dim++)
551 (*mvector)(dim)->ExtractView(avSubList + dim);
552
553 RCP<Epetra_MultiVector> subMvector = rcp(new Epetra_MultiVector(
554 View, *subMap, avSubList, COORDDIM));
555
556 mvector = subMvector;
557
558 delete [] avSubList;
559 delete [] newGids;
560
561 emvBuildN->stop();
562
564 // Each subgroup migrates the sub-multivector so the
565 // global Ids are increasing with process rank.
566
567 TPETRA_GO *increasingGids = NULL;
568 subGroupGloballyIncreasingIds<TPETRA_GO>(numGlobalCoords, nprocs, rank,
569 increasingGids);
570
571 emvMigrateN->start();
572 emvMigrateN->incrementNumCalls();
573
574 RCP<Epetra_BlockMap> newSubMap = rcp(new Epetra_BlockMap(-1,
575 numLocalCoords, increasingGids, 1, 0, *subComm));
576
577 RCP<Epetra_Import> subImporter = rcp(new Epetra_Import(
578 *subMap, *newSubMap));
579
580 RCP<Epetra_MultiVector> newSubMvector = rcp(new Epetra_MultiVector(
581 *newSubMap, COORDDIM));
582
583 newSubMvector->Import(*subMvector, *subImporter, Insert);
584
585 mvector = newSubMvector;
586
587 emvMigrateN->stop();
588
589 delete [] increasingGids;
590}
591
592void timeZoltan(ZOLTAN_GO numGlobalCoords,
593 bool doMemory)
594{
595 int nprocs, rank;
596 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
597 MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
598
600 // Create a global data directory with contiguous global IDs.
601 // (We don't need this, but it is analygous to a Tpetra::Map.)
602
603 ZOLTAN_LO numLocalCoords = numSequentialGlobalIds(numGlobalCoords, nprocs, rank);
604
605 ZOLTAN_GO offset=0;
606 MPI_Exscan(&numLocalCoords, &offset, 1,
607 MPI_ZOLTAN_GO_TYPE, MPI_SUM, MPI_COMM_WORLD);
608
609 ZOLTAN_GO *gids = new ZOLTAN_GO [numLocalCoords];
610 for (ZOLTAN_LO i=0; i < numLocalCoords; i++){
611 gids[i] = offset++;
612 }
613
614 ztnBuild->start();
615 ztnBuild->incrementNumCalls();
616
617 struct Zoltan_DD_Struct *dd = NULL;
618 int rc = Zoltan_DD_Create(&dd, MPI_COMM_WORLD, 1, 1, 0, numLocalCoords, 0);
619 if (rc != ZOLTAN_OK)
620 exit(1);
621
622 rc = Zoltan_DD_Update(dd, gids, NULL, NULL, NULL, numLocalCoords);
623
624 // Create an array of coordinates associated with the global Ids.
625
626 TPETRA_SCALAR *coords = new TPETRA_SCALAR [COORDDIM * numLocalCoords];
627 memset(coords, 0, sizeof(TPETRA_SCALAR) * numLocalCoords * COORDDIM);
628
629 ztnBuild->stop();
630
631 if (rank==0 && doMemory){
632 long nkb = Zoltan2::getProcessKilobytes();
633 std::cout << "Create mvector 1: " << nkb << std::endl;;
634 }
635
636 Zoltan_DD_Destroy(&dd);
637
639 // Migrate the array of coordinates.
640
641 ZOLTAN_GO *newGids = NULL;
642 roundRobinGlobalIds<ZOLTAN_GO>(numGlobalCoords, nprocs, rank, newGids);
643
644 ztnMigrate->start();
645 ztnMigrate->incrementNumCalls();
646
647 struct Zoltan_DD_Struct *ddNew = NULL; // new "map"
648 rc = Zoltan_DD_Create(&ddNew, MPI_COMM_WORLD, 1, 1, 0, numLocalCoords, 0);
649 if (rc != ZOLTAN_OK)
650 exit(1);
651
652 rc = Zoltan_DD_Update(ddNew, newGids, NULL, NULL, NULL, numLocalCoords);
653 if (rc != ZOLTAN_OK)
654 exit(1);
655
656 int *procOwners = new int [numLocalCoords]; // procs to get my data
657 rc = Zoltan_DD_Find(ddNew, gids, NULL, NULL, NULL,
658 numLocalCoords, procOwners);
659 if (rc != ZOLTAN_OK)
660 exit(1);
661
662 Zoltan_DD_Destroy(&ddNew);
663
664 struct Zoltan_Comm_Obj *commPlan = NULL; // global communication plan
665 int tag = 10000;
666 int numReceive = 0;
667
668 rc = Zoltan_Comm_Create(&commPlan, numLocalCoords, procOwners, MPI_COMM_WORLD,
669 tag, &numReceive);
670 if (rc != ZOLTAN_OK)
671 exit(1);
672
673 TPETRA_SCALAR *newCoords = new TPETRA_SCALAR [COORDDIM * numReceive];
674
675 tag = 11000;
676
677 // To prevent compile warnings or errors
678 void *x = static_cast<void *>(coords);
679 char *charCoords = static_cast<char *>(x);
680 x = static_cast<void *>(newCoords);
681 char *charNewCoords = static_cast<char *>(x);
682
683 rc = Zoltan_Comm_Do(commPlan, tag, charCoords,
684 sizeof(TPETRA_SCALAR)*COORDDIM, charNewCoords);
685
686 if (rc != ZOLTAN_OK)
687 exit(1);
688
689 ztnMigrate->stop();
690
691 Zoltan_Comm_Destroy(&commPlan);
692 delete [] coords;
693 delete [] gids;
694
695 if (rank==0 && doMemory){
696 long nkb = Zoltan2::getProcessKilobytes();
697 std::cout << "Create mvector 2: " << nkb << std::endl;;
698 }
699
701 // Divide processes into two halves.
702
703 int groupSize = 0;
704 int leftHalfNumProcs = nprocs / 2;
705 int *myHalfProcs = NULL;
706
707 if (rank < leftHalfNumProcs){
708 groupSize = leftHalfNumProcs;
709 myHalfProcs = new int [groupSize];
710 for (int i=0; i < groupSize; i++)
711 myHalfProcs[i] = i;
712 }
713 else {
714 groupSize = nprocs - leftHalfNumProcs;
715 myHalfProcs = new int [groupSize];
716 for (int i=0; i < groupSize; i++)
717 myHalfProcs[i] = i + leftHalfNumProcs;
718 }
719
720 MPI_Group group, subGroup;
721 MPI_Comm subComm;
722
723 MPI_Comm_group(MPI_COMM_WORLD, &group);
724 MPI_Group_incl(group, groupSize, myHalfProcs, &subGroup);
725 MPI_Comm_create(MPI_COMM_WORLD, subGroup, &subComm);
726 MPI_Group_free(&subGroup);
727
728 delete [] myHalfProcs;
729
730 // Create global data directories for our sub groups.
731 // (Analogous to creating the new MultiVectors in Tpetra.)
732
733 ztnBuildN->start();
734 ztnBuildN->incrementNumCalls();
735
736 struct Zoltan_DD_Struct *ddSub = NULL; // subgroup "map"
737 rc = Zoltan_DD_Create(&ddSub, subComm, 1, 1, 0, numLocalCoords, 0);
738 if (rc != ZOLTAN_OK)
739 exit(1);
740
741 rc = Zoltan_DD_Update(ddSub, newGids, NULL, NULL, NULL, numLocalCoords);
742 if (rc != ZOLTAN_OK)
743 exit(1);
744
745 ztnBuildN->stop();
746
747 Zoltan_DD_Destroy(&ddSub);
748
750 // Each subgroup migrates the sub-arrays so the
751 // global Ids are again increasing with process rank.
752
753 ZOLTAN_GO *increasingGids = NULL;
754 subGroupGloballyIncreasingIds<ZOLTAN_GO>(
755 numGlobalCoords, nprocs, rank, increasingGids);
756
757 // Global "map" corresponding to new contiguous ids.
758
759 ztnMigrateN->start();
760 ztnMigrateN->incrementNumCalls();
761
762 struct Zoltan_DD_Struct *ddNewSub = NULL;
763 rc = Zoltan_DD_Create(&ddNewSub, subComm, 1, 1, 0, numLocalCoords, 0);
764 if (rc != ZOLTAN_OK)
765 exit(1);
766
767 rc = Zoltan_DD_Update(ddNewSub, increasingGids, NULL, NULL, NULL,
768 numLocalCoords);
769
770 // Which processes gets my current coordinates in new map?
771
772 rc = Zoltan_DD_Find(ddNewSub, newGids, NULL, NULL, NULL, numLocalCoords, procOwners);
773 if (rc != ZOLTAN_OK)
774 exit(1);
775
776 delete [] newGids;
777
778 Zoltan_DD_Destroy(&ddNewSub);
779
780 struct Zoltan_Comm_Obj *subCommPlan = NULL; // global communication plan
781 tag = 12000;
782
783 rc = Zoltan_Comm_Create(&subCommPlan, numLocalCoords, procOwners, subComm,
784 tag, &numReceive);
785 if (rc != ZOLTAN_OK)
786 exit(1);
787
788 delete [] procOwners;
789
790 TPETRA_SCALAR *newContigCoords = new TPETRA_SCALAR [COORDDIM * numReceive];
791
792 tag = 13000;
793 // To prevent compile warnings or errors
794 x = static_cast<void *>(newContigCoords);
795 char *charNewContigCoords = static_cast<char *>(x);
796
797 rc = Zoltan_Comm_Do(subCommPlan, tag, charNewCoords,
798 sizeof(TPETRA_SCALAR)*COORDDIM, charNewContigCoords);
799 if (rc != ZOLTAN_OK)
800 exit(1);
801
802 ztnMigrateN->stop();
803
804 delete [] newCoords;
805 delete [] newContigCoords;
806 delete [] increasingGids;
807 Zoltan_Comm_Destroy(&subCommPlan);
808}
809#else
810int main(int narg, char *arg[])
811{
812 Tpetra::ScopeGuard tscope(&narg, &arg);
813 Teuchos::RCP<const Teuchos::Comm<int> > genComm = Tpetra::getDefaultComm();
814
815 if (genComm->getRank() == 0){
816 std::cout << "Test not run because MPI is not available." << std::endl;
817 std::cout << "PASS" << std::endl;
818 }
819 return 0;
820}
821#endif // HAVE_MPI
A gathering of useful namespace methods.
int main()
long getProcessKilobytes()