chombo-discharge
CD_ParticleContainerImplem.H
Go to the documentation of this file.
1 /* chombo-discharge
2  * Copyright © 2021 SINTEF Energy Research.
3  * Please refer to Copyright.txt and LICENSE in the chombo-discharge root directory.
4  */
5 
12 #ifndef CD_ParticleContainerImplem_H
13 #define CD_ParticleContainerImplem_H
14 
15 // Std includes
16 #include <bitset>
17 
18 // Chombo includes
19 #include <ParmParse.H>
20 #include <CH_Timer.H>
21 #include <MPI_util.H>
22 #include <ParticleDataI.H>
23 
24 // Our includes
25 #include <CD_ParticleContainer.H>
26 #include <CD_ParallelOps.H>
27 #include <CD_ParticleOps.H>
28 #include <CD_Timer.H>
29 #include <CD_BoxLoops.H>
30 #include <CD_OpenMP.H>
31 #include <CD_NamespaceHeader.H>
32 
33 template <class P>
35 {
36  m_isDefined = false;
37  m_isOrganizedByCell = false;
38  m_profile = false;
39  m_debug = false;
40  m_verbose = false;
41 }
42 
43 template <class P>
44 ParticleContainer<P>::ParticleContainer(const Vector<DisjointBoxLayout>& a_grids,
45  const Vector<ProblemDomain>& a_domains,
46  const Vector<Real>& a_dx,
47  const Vector<int>& a_refRat,
48  const Vector<ValidMask>& a_validMask,
49  const Vector<RefCountedPtr<LevelTiles>>& a_levelTiles,
50  const RealVect& a_probLo,
51  const int a_blockingFactor,
52  const int a_finestLevel,
53  const std::string a_realm)
54 {
55  CH_TIME("ParticleContainer::ParticleContainer");
56 
57  this->define(a_grids,
58  a_domains,
59  a_dx,
60  a_refRat,
61  a_validMask,
62  a_levelTiles,
63  a_probLo,
64  a_blockingFactor,
65  a_finestLevel,
66  a_realm);
67 }
68 
69 template <class P>
71 {
72  CH_TIME("ParticleContainer::~ParticleContainer");
73 }
74 
75 template <class P>
76 void
77 ParticleContainer<P>::define(const Vector<DisjointBoxLayout>& a_grids,
78  const Vector<ProblemDomain>& a_domains,
79  const Vector<Real>& a_dx,
80  const Vector<int>& a_refRat,
81  const Vector<ValidMask>& a_validMask,
82  const Vector<RefCountedPtr<LevelTiles>>& a_levelTiles,
83  const RealVect& a_probLo,
84  const int a_blockingFactor,
85  const int a_finestLevel,
86  const std::string a_realm)
87 {
88  CH_TIME("ParticleContainer::~ParticleContainer");
89 
90  m_grids = a_grids;
91  m_domains = a_domains;
92  m_refRat = a_refRat;
93  m_validRegion = a_validMask;
94  m_probLo = a_probLo;
95  m_finestLevel = a_finestLevel;
96  m_realm = a_realm;
97  m_blockingFactor = a_blockingFactor;
98  m_levelTiles = a_levelTiles;
99 
100  m_dx.resize(1 + m_finestLevel);
101  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
102  m_dx[lvl] = a_dx[lvl] * RealVect::Unit;
103  }
104 
105  constexpr int base = 0;
106 
107  // Do the define stuff.
108  this->setupGrownGrids(base, m_finestLevel);
109  this->setupParticleData(base, m_finestLevel);
110 
111  m_isDefined = true;
112  m_isOrganizedByCell = false;
113  m_profile = false;
114 
115  ParmParse pp("ParticleContainer");
116  pp.query("profile", m_profile);
117  pp.query("debug", m_debug);
118  pp.query("verbose", m_verbose);
119 }
120 
121 template <class P>
122 void
123 ParticleContainer<P>::setupGrownGrids(const int a_base, const int a_finestLevel)
124 {
125  CH_TIME("ParticleContainer::setupGrownGrids");
126  if (m_verbose) {
127  pout() << "ParticleContainer::setupGrownGrids" << endl;
128  }
129 
130  // TLDR: This routine sets up the buffer grids which can be used when we need to fetch particles that fall off each levels' grid. This is very useful
131  // when we want to fetch particles that lie on the coarse grid of a refinement boundary.
132 
133  m_grownGrids.resize(1 + a_finestLevel);
134 
135  for (int lvl = 0; lvl <= a_finestLevel; lvl++) {
136  const DisjointBoxLayout& dbl = m_grids[lvl];
137  const ProblemDomain& domain = m_domains[lvl];
138 
139  // Grow boxes by refinement factor on the finer levels.
140  Vector<Box> boxes = dbl.boxArray();
141  if (lvl > 0) {
142  for (auto& box : boxes.stdVector()) {
143  box.grow(m_refRat[lvl - 1]);
144  box &= domain;
145  }
146  }
147 
148  m_grownGrids[lvl] = BoxLayout(boxes, dbl.procIDs());
149  }
150 }
151 
152 template <class P>
153 void
154 ParticleContainer<P>::setupParticleData(const int a_base, const int a_finestLevel)
155 {
156  CH_TIME("ParticleContainer::setupParticleData");
157  if (m_verbose) {
158  pout() << "ParticleContainer::setupParticleData" << endl;
159  }
160 
161  // TLDR: This sets up the most commonly used particle data holders for this particle AMR container. This means that
162  // we allocate:
163  //
164  // 1. The "normal" particle container data holder m_particles. This is defined on the DisjointBoxLayout
165  // which is the natural place for the particles to live.
166  //
167  // 2. A buffer particle data holder which is defined on a grown DisjointBoxLayout. This is useful when particles
168  // on the coarse level need to deposit to the fine level.
169  //
170  // 3. A data holder for "masked particles", providing an opportunity to copy/transfer some of the particles on a grid
171  // level to a separate data holder if they lie within a "mask". Typically used for extracting coarse-level that live
172  // just outside the fine grid (i.e. on the coarse side of the refinement boundary).
173  //
174  // 5. A data holder for storing cell-sorted particles. Very useful when particles need to be sorted by cell rather than patch.
175 
176  m_particles.resize(1 + a_finestLevel);
177  m_bufferParticles.resize(1 + a_finestLevel);
178  m_maskParticles.resize(1 + a_finestLevel);
179  m_cellSortedParticles.resize(1 + a_finestLevel);
180 
181  for (int lvl = a_base; lvl <= a_finestLevel; lvl++) {
182  m_particles[lvl] = RefCountedPtr<ParticleData<P>>(
183  new ParticleData<P>(m_grids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
184 
185  m_bufferParticles[lvl] = RefCountedPtr<ParticleData<P>>(
186  new ParticleData<P>(m_grownGrids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
187 
188  m_maskParticles[lvl] = RefCountedPtr<ParticleData<P>>(
189  new ParticleData<P>(m_grids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
190 
191  m_cellSortedParticles[lvl] = RefCountedPtr<LayoutData<BinFab<P>>>(new LayoutData<BinFab<P>>(m_grids[lvl]));
192  }
193 }
194 
195 template <class P>
196 void
198 {
199  CH_TIME("ParticleContainer::sortParticles");
200  if (m_verbose) {
201  pout() << "ParticleContainer::sortParticles" << endl;
202  }
203 
204  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
205 
206  const DisjointBoxLayout& dbl = m_grids[lvl];
207  const DataIterator& dit = dbl.dataIterator();
208 
209  const int nbox = dit.size();
210 
211 #pragma omp parallel for schedule(runtime)
212  for (int mybox = 0; mybox < nbox; mybox++) {
213  const DataIndex& din = dit[mybox];
214 
215  List<P>& particles = (*m_particles[lvl])[din].listItems();
216 
217  particles.sort();
218  }
219  }
220 }
221 
222 template <class P>
223 bool
225 {
226  return m_isOrganizedByCell;
227 }
228 
229 template <class P>
230 int
232 {
233  CH_assert(m_isDefined);
234 
235  return m_finestLevel;
236 }
237 
238 template <class P>
239 const std::string
241 {
242  CH_assert(m_isDefined);
243 
244  return m_realm;
245 }
246 
247 template <class P>
248 const Vector<DisjointBoxLayout>&
250 {
251  return m_grids;
252 }
253 
254 template <class P>
255 const RealVect
257 {
258  return m_probLo;
259 }
260 
261 template <class P>
262 const Vector<RealVect>
264 {
265  return m_dx;
266 }
267 
268 template <class P>
271 {
272  CH_assert(m_isDefined);
273 
274  if (m_isOrganizedByCell) {
275  MayDay::Error("ParticleContainer::getParticles - particles are sorted by cell!");
276  }
277 
278  return m_particles;
279 }
280 
281 template <class P>
282 const AMRParticles<P>&
284 {
285  CH_assert(m_isDefined);
286 
287  if (m_isOrganizedByCell) {
288  MayDay::Abort("ParticleContainer::getParticles - particles are sorted by cell!");
289  }
290 
291  return m_particles;
292 }
293 
294 template <class P>
297 {
298  CH_assert(m_isDefined);
299 
300  return m_bufferParticles;
301 }
302 
303 template <class P>
304 const AMRParticles<P>&
306 {
307  CH_assert(m_isDefined);
308 
309  return m_bufferParticles;
310 }
311 
312 template <class P>
315 {
316  CH_assert(m_isDefined);
317 
318  return m_maskParticles;
319 }
320 
321 template <class P>
322 const AMRParticles<P>&
324 {
325  CH_assert(m_isDefined);
326 
327  return m_maskParticles;
328 }
329 
330 template <class P>
331 ParticleData<P>&
333 {
334  CH_assert(m_isDefined);
335 
336  if (m_isOrganizedByCell) {
337  MayDay::Error("ParticleContainer::operator[](a_lvl) - particles are sorted by cell!");
338  }
339 
340  return *m_particles[a_lvl];
341 }
342 
343 template <class P>
344 const ParticleData<P>&
345 ParticleContainer<P>::operator[](const int a_level) const
346 {
347  CH_assert(m_isDefined);
348 
349  if (m_isOrganizedByCell) {
350  MayDay::Error("ParticleContainer::operator[](a_lvl) - particles are sorted by cell!");
351  }
352 
353  return *m_particles[a_level];
354 }
355 
356 template <class P>
359 {
360  CH_assert(m_isDefined);
361 
362  if (!m_isOrganizedByCell) {
363  MayDay::Error("ParticleContainer::getCellParticles()- particles are not sorted by cell!");
364  }
365 
366  return m_cellSortedParticles;
367 }
368 
369 template <class P>
372 {
373  CH_assert(m_isDefined);
374 
375  if (!m_isOrganizedByCell) {
376  MayDay::Error("ParticleContainer::getCellParticles()- particles are not sorted by cell!");
377  }
378 
379  return m_cellSortedParticles;
380 }
381 
382 template <class P>
383 LayoutData<BinFab<P>>&
385 {
386  CH_assert(m_isDefined);
387 
388  if (!m_isOrganizedByCell) {
389  MayDay::Error("ParticleContainer::getCellParticles(level)- particles are not sorted by cell!");
390  }
391 
392  return *m_cellSortedParticles[a_level];
393 }
394 
395 template <class P>
396 const LayoutData<BinFab<P>>&
398 {
399  CH_TIME("ParticleContainer::getCellParticles(int)");
400 
401  CH_assert(m_isDefined);
402 
403  if (!m_isOrganizedByCell) {
404  MayDay::Error("ParticleContainer::getCellParticles(level)- particles are not sorted by cell!");
405  }
406 
407  return *m_cellSortedParticles[a_level];
408 }
409 
410 template <class P>
411 void
412 ParticleContainer<P>::getCellParticles(BinFab<P>& cellParticles, const int a_lvl, const DataIndex a_dit) const
413 {
414  CH_TIME("ParticleContainer::getCellParticles(BinFab)");
415 
416  CH_assert(m_isDefined);
417 
418  if (!m_isOrganizedByCell) {
419  MayDay::Error("ParticleContainer::getCellParticles - particles are not sorted by cell!");
420  }
421 
422  cellParticles.define(m_grids[a_lvl][a_dit], m_dx[a_lvl], m_probLo);
423  cellParticles.addItems((*m_particles[a_lvl])[a_dit].listItems());
424 }
425 
426 template <class P>
427 void
428 ParticleContainer<P>::getCellParticlesDestructive(BinFab<P>& cellParticles, const int a_lvl, const DataIndex a_dit)
429 {
430  CH_TIME("ParticleContainer::getCellParticlesDestructive");
431 
432  CH_assert(m_isDefined);
433 
434  if (!m_isOrganizedByCell) {
435  MayDay::Error("ParticleContainer::getCellParticlesDestructive - particles are not sorted by cell!");
436  }
437 
438  cellParticles.define(m_grids[a_lvl].get(a_dit), m_dx[a_lvl], m_probLo);
439  cellParticles.addItemsDestructive((*m_particles[a_lvl])[a_dit].listItems());
440 }
441 
442 template <class P>
443 BinFab<P>&
444 ParticleContainer<P>::getCellParticles(const int a_level, const DataIndex a_dit)
445 {
446  CH_TIME("ParticleContainer::getCellParticles(int, DataIndex)");
447 
448  CH_assert(m_isDefined);
449 
450  if (!m_isOrganizedByCell) {
451  MayDay::Error("ParticleContainer::getCellParticles(int, dit) - particles are not sorted by cell!");
452  }
453 
454  return (*m_cellSortedParticles[a_level])[a_dit];
455 }
456 
457 template <class P>
458 const BinFab<P>&
459 ParticleContainer<P>::getCellParticles(const int a_level, const DataIndex a_dit) const
460 {
461  CH_TIME("ParticleContainer::getCellParticles(int, DataIndex)");
462 
463  CH_assert(m_isDefined);
464 
465  if (!m_isOrganizedByCell) {
466  MayDay::Error("ParticleContainer::getCellParticles(int, dit) - particles are not sorted by cell!");
467  }
468 
469  return (*m_cellSortedParticles[a_level])[a_dit];
470 }
471 
472 template <class P>
473 void
475 {
476  CH_TIME("ParticleContainer::organizeParticlesByCell");
477  if (m_verbose) {
478  pout() << "ParticleContainer::organizeParticlesByCell" << endl;
479  }
480 
481  CH_assert(m_isDefined);
482 
483  if (!m_isOrganizedByCell) {
484 
485  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
486  const DisjointBoxLayout& dbl = m_grids[lvl];
487  const DataIterator& dit = dbl.dataIterator();
488 
489  const int nbox = dit.size();
490 
491 #pragma omp parallel for schedule(runtime)
492  for (int mybox = 0; mybox < nbox; mybox++) {
493  const DataIndex& din = dit[mybox];
494 
495  BinFab<P>& cellParticles = (*m_cellSortedParticles[lvl])[din];
496 
497  cellParticles.define(dbl[din], m_dx[lvl], m_probLo);
498  cellParticles.addItemsDestructive((*m_particles[lvl])[din].listItems());
499  }
500  }
501 
502  m_isOrganizedByCell = true;
503  }
504 }
505 
506 template <class P>
507 void
509 {
510  CH_TIME("ParticleContainer::organizeParticlesByPatch");
511  if (m_verbose) {
512  pout() << "ParticleContainer::organizeParticlesByPatch" << endl;
513  }
514 
515  CH_assert(m_isDefined);
516 
517  if (m_isOrganizedByCell) {
518 
519  constexpr int comp = 0;
520 
521  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
522  const DisjointBoxLayout& dbl = m_grids[lvl];
523  const DataIterator& dit = dbl.dataIterator();
524 
525  const int nbox = dit.size();
526 
527 #pragma omp parallel for schedule(runtime)
528  for (int mybox = 0; mybox < nbox; mybox++) {
529  const DataIndex& din = dit[mybox];
530 
531  ListBox<P>& patchParticles = (*m_particles[lvl])[din];
532  BinFab<P>& cellParticles = (*m_cellSortedParticles[lvl])[din];
533 
534  // Kernel which adds moves particles from the cell container to the patch container.
535  auto kernel = [&](const IntVect& iv) -> void {
536  patchParticles.addItemsDestructive(cellParticles(iv, comp));
537  };
538 
539  BoxLoops::loop(dbl[din], kernel);
540 
541  cellParticles.clear();
542  }
543  }
544 
545  m_isOrganizedByCell = false;
546  }
547 }
548 
549 template <class P>
550 void
551 ParticleContainer<P>::addParticles(const List<P>& a_particles)
552 {
553  CH_TIME("ParticleContainer::addParticles");
554  if (m_verbose) {
555  pout() << "ParticleContainer::addParticles" << endl;
556  }
557 
558  CH_assert(m_isDefined);
559 
560  if (m_isOrganizedByCell) {
561  MayDay::Error("ParticleContainer::addParticles(List<P>) - particles are sorted by cell!");
562  }
563 
564  // TLDR: This routine is almost a precise copy of the remap routine, with the exception that the particles to be remapped
565  // are only the input particles rather than everything inside m_particles
566 
567  const unsigned int numRanks = numProc();
568  const unsigned int myRank = procID();
569 
570  using LevelAndIndex = std::pair<unsigned int, unsigned int>;
571 
572  std::vector<std::map<LevelAndIndex, List<P>>> particlesToSend(numRanks);
573 
574 #pragma omp parallel
575  {
576  List<P> outcasts;
577 
578 #pragma omp master
579  {
580  outcasts.join(a_particles);
581  }
582 
583  std::vector<std::map<LevelAndIndex, List<P>>> threadLocalParticlesToSend(numRanks);
584 
585  // Collect particles owned by this rank/thread.
586  this->transferParticlesToSingleList(outcasts, m_particles);
587 
588  // Map the particles to other grid patches
589  this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
590 
591  // Catenate the thread-local particle lists onto particlesToSend
592 #pragma omp critical
593  this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
594  }
595 
596  // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
597  this->assignLocalParticles(particlesToSend[myRank], m_particles);
598 
599  // If using MPI then we have to scatter the particles across MPI ranks.
600 #ifdef CH_MPI
601  std::map<LevelAndIndex, List<P>> receivedParticles;
602 
603  ParticleOps::scatterParticles(receivedParticles, particlesToSend);
604 
605  // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
606  // we got from there.
607  this->assignLocalParticles(receivedParticles, m_particles);
608 #endif
609 
610  if (m_debug) {
611  this->sanityCheck();
612  }
613 }
614 
615 template <class P>
616 void
618 {
619  CH_TIME("ParticleContainer::addParticlesDestructive");
620  if (m_verbose) {
621  pout() << "ParticleContainer::addParticlesDestructive" << endl;
622  }
623 
624  CH_assert(m_isDefined);
625 
626  if (m_isOrganizedByCell) {
627  MayDay::Error("ParticleContainer::addParticlesDestructive(List<P>) - particles are sorted by cell!");
628  }
629 
630  // TLDR: This routine is almost a precise copy of the remap routine, with the exception that the particles to be remapped
631  // are only the input particles rather than everything inside m_particles
632 
633  const unsigned int numRanks = numProc();
634  const unsigned int myRank = procID();
635 
636  using LevelAndIndex = std::pair<unsigned int, unsigned int>;
637 
638  std::vector<std::map<LevelAndIndex, List<P>>> particlesToSend(numRanks);
639 
640 #pragma omp parallel
641  {
642  List<P> outcasts;
643 
644 #pragma omp master
645  {
646  outcasts.catenate(a_particles);
647  }
648 
649  std::vector<std::map<LevelAndIndex, List<P>>> threadLocalParticlesToSend(numRanks);
650 
651  // Collect particles owned by this rank/thread.
652  this->transferParticlesToSingleList(outcasts, m_particles);
653 
654  // Map the particles to other grid patches
655  this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
656 
657  // Catenate the thread-local particle lists onto particlesToSend
658 #pragma omp critical
659  this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
660  }
661 
662  // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
663  this->assignLocalParticles(particlesToSend[myRank], m_particles);
664 
665  // If using MPI then we have to scatter the particles across MPI ranks.
666 #ifdef CH_MPI
667  std::map<LevelAndIndex, List<P>> receivedParticles;
668 
669  ParticleOps::scatterParticles(receivedParticles, particlesToSend);
670 
671  // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
672  // we got from there.
673  this->assignLocalParticles(receivedParticles, m_particles);
674 #endif
675 
676  if (m_debug) {
677  this->sanityCheck();
678  }
679 }
680 
681 template <class P>
682 void
683 ParticleContainer<P>::addParticles(const BinFab<P>& a_particles, const int a_lvl, const DataIndex a_dit)
684 {
685  CH_TIME("ParticleContainer::addParticles(BinFab)");
686  if (m_verbose) {
687  pout() << "ParticleContainer::addParticles(BinFab)" << endl;
688  }
689 
690  CH_assert(m_isDefined);
691  CH_assert(m_grids[a_lvl].get(a_dit) == a_particles.getRegion());
692 
693  if (!m_isOrganizedByCell) {
694  MayDay::Abort("ParticleContainer::addParticles(BinFab<P>) - particles are not sorted by cell!");
695  }
696 
697  constexpr int comp = 0;
698 
699  BinFab<P>& boxParticles = (*m_cellSortedParticles[a_lvl])[a_dit];
700 
701  auto kernel = [&](const IntVect& iv) -> void {
702  List<P>& myParticles = boxParticles(iv, comp);
703  const List<P>& inputParticles = a_particles(iv, comp);
704 
705  myParticles.join(inputParticles);
706  };
707 
708  BoxLoops::loop(m_grids[a_lvl][a_dit], kernel);
709 }
710 
711 template <class P>
712 void
713 ParticleContainer<P>::addParticlesDestructive(BinFab<P>& a_particles, const int a_lvl, const DataIndex a_dit)
714 {
715  CH_TIME("ParticleContainer::addParticlesDestructive(BinFab)");
716  if (m_verbose) {
717  pout() << "ParticleContainer::addParticlesDestructive(BinFab)" << endl;
718  }
719 
720  CH_assert(m_isDefined);
721  CH_assert(m_grids[a_lvl].get(a_dit) == a_particles.getRegion());
722 
723  if (!m_isOrganizedByCell) {
724  MayDay::Error("ParticleContainer::addParticles(BinFab<P>) - particles are not sorted by cell!");
725  }
726 
727  constexpr int comp = 0;
728 
729  BinFab<P>& boxParticles = (*m_cellSortedParticles[a_lvl])[a_dit];
730 
731  auto kernel = [&](const IntVect& iv) -> void {
732  List<P>& myParticles = boxParticles(iv, comp);
733  const List<P>& inputParticles = a_particles(iv, comp);
734 
735  myParticles.catenate(inputParticles);
736  };
737 
738  BoxLoops::loop(m_grids[a_lvl][a_dit], kernel);
739 }
740 
741 template <class P>
742 void
744 {
745  CH_TIME("ParticleContainer::addParticles(ParticleContainer)");
746  if (m_verbose) {
747  pout() << "ParticleContainer::addParticles(ParticleContainer)" << endl;
748  }
749 
750  CH_assert(m_isDefined);
751 
752  if (m_isOrganizedByCell) {
753  MayDay::Error("ParticleContainer::addParticles(ParticleContainer<P>) - particles are sorted by cell!");
754  }
755 
756  // TLDR: This routine adds particles from a different container to this container. If the containers live on the same realm
757  // we just add the particles directly. Otherwise, we have to add to the outcast list and remap.
758 
759  if (m_realm == a_otherContainer.getRealm()) {
760  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
761  const DisjointBoxLayout& dbl = m_grids[lvl];
762  const DataIterator& dit = dbl.dataIterator();
763 
764  const int nbox = dit.size();
765 
766 #pragma omp parallel for schedule(runtime)
767  for (int mybox = 0; mybox < nbox; mybox++) {
768  const DataIndex& din = dit[mybox];
769 
770  ListBox<P>& myParticles = (*m_particles[lvl])[din];
771  const ListBox<P>& otherParticles = a_otherContainer[lvl][din];
772 
773  myParticles.addItems(otherParticles.listItems());
774  }
775  }
776  }
777  else {
778  // If we're adding particles across realms we need to collect the input particles onto lists that get remapped
779  // into m_particles. A full explanation of how this works is given in the remap routin.
780 
781  const unsigned int numRanks = numProc();
782  const unsigned int myRank = procID();
783 
784  using LevelAndIndex = std::pair<unsigned int, unsigned int>;
785 
786  std::vector<std::map<LevelAndIndex, List<P>>> particlesToSend(numRanks);
787 
788 #pragma omp parallel
789  {
790  List<P> outcasts;
791 
792  std::vector<std::map<LevelAndIndex, List<P>>> threadLocalParticlesToSend(numRanks);
793 
794  // Collect particles owned by this rank/thread, but on the other realm.
795  a_otherContainer.copyParticlesToSingleList(outcasts, a_otherContainer.getParticles());
796 
797  // Map the particles to other grid patches
798  this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
799 
800  // Catenate the thread-local particle lists onto particlesToSend
801 #pragma omp critical
802  this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
803  }
804 
805  // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
806  this->assignLocalParticles(particlesToSend[myRank], m_particles);
807 
808  // If using MPI then we have to scatter the particles across MPI ranks.
809 #ifdef CH_MPI
810  std::map<LevelAndIndex, List<P>> receivedParticles;
811 
812  ParticleOps::scatterParticles(receivedParticles, particlesToSend);
813 
814  // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
815  // we got from there.
816  this->assignLocalParticles(receivedParticles, m_particles);
817 #endif
818  }
819 
820  if (m_debug) {
821  this->sanityCheck();
822  }
823 }
824 
825 template <class P>
826 void
828 {
829  CH_TIME("ParticleContainer::addParticlesDestructive(ParticleContainer)");
830  if (m_verbose) {
831  pout() << "ParticleContainer::addParticlesDestructive(ParticleContainer)" << endl;
832  }
833 
834  // TLDR: This routine adds particles from a different container to this container. If the containers live on the same realm
835  // we just add the particles directly. Otherwise, we have to add to the outcast list and remap.
836 
837  CH_assert(m_isDefined);
838 
839  if (m_isOrganizedByCell) {
840  MayDay::Error("ParticleContainer::addParticles(ParticleContainer<P>) - particles are sorted by cell!");
841  }
842 
843  // TLDR: This routine adds particles from a different container to this container. If the containers live on the same realm
844  // we just add the particles directly. Otherwise, we have to add to the outcast list and remap.
845 
846  if (m_realm == a_otherContainer.getRealm()) {
847  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
848  const DisjointBoxLayout& dbl = m_grids[lvl];
849  const DataIterator& dit = dbl.dataIterator();
850 
851  const int nbox = dit.size();
852 
853 #pragma omp parallel for schedule(runtime)
854  for (int mybox = 0; mybox < nbox; mybox++) {
855  const DataIndex& din = dit[mybox];
856 
857  ListBox<P>& myParticles = (*m_particles[lvl])[din];
858  const ListBox<P>& otherParticles = a_otherContainer[lvl][din];
859 
860  myParticles.addItems(otherParticles.listItems());
861  }
862  }
863  }
864  else {
865  // If we're adding particles across realms we need to collect the input particles onto lists that get remapped
866  // into m_particles. A full explanation of how this works is given in the remap routin.
867 
868  const unsigned int numRanks = numProc();
869  const unsigned int myRank = procID();
870 
871  using LevelAndIndex = std::pair<unsigned int, unsigned int>;
872 
873  std::vector<std::map<LevelAndIndex, List<P>>> particlesToSend(numRanks);
874 
875 #pragma omp parallel
876  {
877  List<P> outcasts;
878 
879  std::vector<std::map<LevelAndIndex, List<P>>> threadLocalParticlesToSend(numRanks);
880 
881  // Collect particles owned by this rank/thread, but on the other realm.
882  a_otherContainer.transferParticlesToSingleList(outcasts, a_otherContainer.getParticles());
883 
884  // Map the particles to other grid patches
885  this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
886 
887  // Catenate the thread-local particle lists onto particlesToSend
888 #pragma omp critical
889  this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
890  }
891 
892  // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
893  this->assignLocalParticles(particlesToSend[myRank], m_particles);
894 
895  // If using MPI then we have to scatter the particles across MPI ranks.
896 #ifdef CH_MPI
897  std::map<LevelAndIndex, List<P>> receivedParticles;
898 
899  ParticleOps::scatterParticles(receivedParticles, particlesToSend);
900 
901  // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
902  // we got from there.
903  this->assignLocalParticles(receivedParticles, m_particles);
904 #endif
905  }
906 
907  if (m_debug) {
908  this->sanityCheck();
909  }
910 }
911 
912 template <class P>
913 void
915 {
916  CH_TIME("ParticleContainer::transferParticles");
917 
918  CH_assert(m_isDefined);
919  CH_assert(this->getRealm() == a_otherContainer.getRealm());
920 
921  if (m_isOrganizedByCell) {
922  MayDay::Error("ParticleContainer::transferParticles(ParticleContainer<P>) - particles are sorted by cell!");
923  }
924 
925  if (a_otherContainer.isOrganizedByCell()) {
926  MayDay::Error("ParticleContainer::transferParticles(ParticleContainer<P>) - other particles are sorted by cell!");
927  }
928 
929  if (a_otherContainer.getRealm() != m_realm) {
930  MayDay::Error(
931  "ParticleContainer::transferParticles(ParticleContainer<P>) - other container defined over a different realm");
932  }
933 
934  this->transferParticles(a_otherContainer.getParticles());
935 }
936 
937 template <class P>
938 void
940 {
941  CH_TIME("ParticleContainer::transferParticles(AMRParticles)");
942  if (m_verbose) {
943  pout() << "ParticleContainer::transferParticle(AMRParticles)" << endl;
944  }
945 
946  CH_assert(m_isDefined);
947 
948  if (m_isOrganizedByCell) {
949  MayDay::Error("ParticleContainer::transferParticles(ParticleContainer<P>) - particles are sorted by cell!");
950  }
951 
952  // Ok, transfer the particles.
953  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
954  const DisjointBoxLayout& dbl = m_grids[lvl];
955  const DataIterator& dit = dbl.dataIterator();
956 
957  const int nbox = dit.size();
958 
959 #pragma omp parallel for schedule(runtime)
960  for (int mybox = 0; mybox < nbox; mybox++) {
961  const DataIndex& din = dit[mybox];
962 
963  List<P>& myParticles = (*m_particles[lvl])[din].listItems();
964  List<P>& otherParticles = (*a_particles[lvl])[din].listItems();
965 
966  myParticles.catenate(otherParticles);
967  }
968  }
969 }
970 
971 template <class P>
972 void
974 {
975  CH_TIME("ParticleContainer<P>::remap");
976  if (m_verbose) {
977  pout() << "ParticleContainer::remap" << endl;
978  }
979 
980  // TLDR: This routine is quite long but does the full remapping on the whole hierarchy. It will discard particles that fall off the grid.
981  //
982  // This is done in the following steps:
983  // 1) Collect all particles from this rank onto thread-local variables
984  // 2) Iterate through those particles and figure out where they end up up the AMR hierarchy (level, grid index, and ownership, i.e. the MPI rank)
985  // 3) Collect those particles onto a rank-only (no thread loacl) variable
986  // 4) Assign particles _locally_, i.e. assign particles sent from this rank to this rank directly onto m_particles
987  // 5) If using MPI, scatter the particles to the appropriate ranks.
988  // 6) Assign particles locally from the particles that were scattered to this rank.
989 
990  const unsigned int numRanks = numProc();
991  const unsigned int myRank = procID();
992 
993  using LevelAndIndex = std::pair<unsigned int, unsigned int>;
994 
995  // These are the particles that get sent to each MPI rank. The pair contains the grid level and grid index.
996  // Each MPI rank (and OpenMP thread) goes through it's particles and checks if they've falled off the grid. If they have,
997  // then the particles are moved onto appropriate lists that will get sent to each rank.
998  std::vector<std::map<LevelAndIndex, List<P>>> particlesToSend(numRanks);
999 
1000 #pragma omp parallel
1001  {
1002  List<P> outcasts;
1003 
1004  std::vector<std::map<LevelAndIndex, List<P>>> threadLocalParticlesToSend(numRanks);
1005 
1006  // Collect particles owned by this rank/thread.
1007  this->transferParticlesToSingleList(outcasts, m_particles);
1008 
1009  // Map the particles to other grid patches
1010  this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
1011 
1012  // Catenate the thread-local particle lists onto particlesToSend
1013 #pragma omp critical
1014  this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
1015  }
1016 
1017  // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
1018  this->assignLocalParticles(particlesToSend[myRank], m_particles);
1019 
1020  // If using MPI then we have to scatter the particles across MPI ranks.
1021 #ifdef CH_MPI
1022  std::map<LevelAndIndex, List<P>> receivedParticles;
1023 
1024  ParticleOps::scatterParticles(receivedParticles, particlesToSend);
1025 
1026  // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
1027  // we got from there.
1028  this->assignLocalParticles(receivedParticles, m_particles);
1029 #endif
1030 
1031  if (m_debug) {
1032  this->sanityCheck();
1033  }
1034 }
1035 
1036 template <typename P>
1037 void
1039 {
1040  CH_TIME("ParticleContainer::sanityCheck");
1041 
1042  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1043  const DisjointBoxLayout& dbl = m_grids[lvl];
1044  const RealVect dx = m_dx[lvl];
1045 
1046  for (DataIterator dit(dbl); dit.ok(); ++dit) {
1047  const List<P>& particles = (*m_particles[lvl])[dit()].listItems();
1048  const Box box = dbl[dit()];
1049 
1050  for (ListIterator<P> lit(particles); lit.ok(); ++lit) {
1051  const RealVect particlePosition = lit().position();
1052  const IntVect particleCell = ParticleOps::getParticleCellIndex(particlePosition, m_probLo, dx);
1053 
1054  if (!(box.contains(particleCell))) {
1055  MayDay::Error("ParticleContainer::remap -- error2: particle is not contained in grid box!");
1056  }
1057  }
1058  }
1059  }
1060 }
1061 
1062 template <typename P>
1063 inline void
1064 ParticleContainer<P>::transferParticlesToSingleList(List<P>& a_list, AMRParticles<P>& a_particles) const noexcept
1065 {
1066  CH_TIME("ParticleContainer::transferParticlesToSingleList");
1067 
1068  for (int lvl = 0; lvl < a_particles.size(); lvl++) {
1069  const BoxLayout& dbl = a_particles[lvl]->getBoxes();
1070  const DataIterator& dit = dbl.dataIterator();
1071 
1072  const int nbox = dit.size();
1073 
1074 #pragma omp for schedule(runtime)
1075  for (int mybox = 0; mybox < nbox; mybox++) {
1076  const DataIndex& din = dit[mybox];
1077 
1078  List<P>& particles = (*a_particles[lvl])[din].listItems();
1079 
1080  a_list.catenate(particles);
1081  }
1082  }
1083 }
1084 
1085 template <typename P>
1086 inline void
1087 ParticleContainer<P>::copyParticlesToSingleList(List<P>& a_list, const AMRParticles<P>& a_particles) const noexcept
1088 {
1089  CH_TIME("ParticleContainer::copyParticlesToSingleList");
1090 
1091  for (int lvl = 0; lvl < a_particles.size(); lvl++) {
1092  const BoxLayout& dbl = a_particles[lvl]->getBoxes();
1093  const DataIterator& dit = dbl.dataIterator();
1094 
1095  const int nbox = dit.size();
1096 
1097 #pragma omp for schedule(runtime)
1098  for (int mybox = 0; mybox < nbox; mybox++) {
1099  const DataIndex& din = dit[mybox];
1100 
1101  const List<P>& particles = (*a_particles[lvl])[din].listItems();
1102 
1103  a_list.join(particles);
1104  }
1105  }
1106 }
1107 
1108 template <typename P>
1109 inline void
1111  std::vector<std::map<std::pair<unsigned int, unsigned int>, List<P>>>& a_mappedParticles,
1112  List<P>& a_unmappedParticles) const noexcept
1113 {
1114  CH_TIME("ParticleContainer::mapParticlesToAMRGrid");
1115 
1116  const unsigned int numRanks = numProc();
1117  const unsigned int myRank = procID();
1118 
1119  std::vector<RealVect> quasiDx(1 + m_finestLevel);
1120 
1121  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1122  quasiDx[lvl] = m_blockingFactor * m_dx[lvl];
1123  }
1124 
1125  // Starting on the finest level -- figure out where the particles wound up. We are going from finest to coarsest here because we want
1126  // the particle to end up on the finest grid level that contains them.
1127  for (ListIterator<P> lit(a_unmappedParticles); lit.ok();) {
1128  bool foundTile = false;
1129 
1130  for (int lvl = m_finestLevel; lvl >= 0 && !foundTile; lvl--) {
1131  const IntVect particleTile = locateBin(lit().position(), quasiDx[lvl], m_probLo);
1132 
1133  const auto& myLevelTiles = m_levelTiles[lvl]->getMyTiles();
1134  const auto& otherLevelTiles = m_levelTiles[lvl]->getOtherTiles();
1135 
1136  if (myLevelTiles.find(particleTile) != myLevelTiles.end()) {
1137  // Found the particle on this level and this rank is the one who owns it.
1138  const unsigned int gridIndex = myLevelTiles.at(particleTile);
1139  const unsigned int toRank = myRank;
1140 
1141  a_mappedParticles[toRank][std::pair<unsigned int, unsigned int>(lvl, gridIndex)].transfer(lit);
1142 
1143  foundTile = true;
1144  }
1145 #ifdef CH_MPI
1146  else if (otherLevelTiles.find(particleTile) != otherLevelTiles.end()) {
1147  // Particle was found on this level but no on this rank.
1148  const unsigned int gridIndex = otherLevelTiles.at(particleTile).first;
1149  const unsigned int& toRank = otherLevelTiles.at(particleTile).second;
1150 
1151  a_mappedParticles[toRank][std::pair<unsigned int, unsigned int>(lvl, gridIndex)].transfer(lit);
1152 
1153  foundTile = true;
1154  }
1155 #endif
1156  }
1157 
1158  // If this triggers the particle fell off the domain and just move onto the next one.
1159  if (!foundTile) {
1160  ++lit;
1161  }
1162  }
1163 
1164  // Rest of the particles fell of the domain and have to go.
1165  a_unmappedParticles.clear();
1166 }
1167 
1168 template <typename P>
1169 inline void
1171  std::vector<std::map<std::pair<unsigned int, unsigned int>, List<P>>>& a_globalParticles,
1172  std::vector<std::map<std::pair<unsigned int, unsigned int>, List<P>>>& a_localParticles) const noexcept
1173 {
1174  CH_TIME("ParticleContainer::catenateParticleMaps");
1175 
1176  const unsigned int numRanks = numProc();
1177 
1178  using LevelAndIndex = std::pair<unsigned int, unsigned int>;
1179 
1180  for (int irank = 0; irank < numRanks; irank++) {
1181  std::map<LevelAndIndex, List<P>>& globalParticles = a_globalParticles[irank];
1182 
1183  for (auto& m : a_localParticles[irank]) {
1184  globalParticles[m.first].catenate(m.second);
1185  }
1186  }
1187 }
1188 
1189 template <typename P>
1190 inline void
1191 ParticleContainer<P>::assignLocalParticles(std::map<std::pair<unsigned int, unsigned int>, List<P>>& a_mappedParticles,
1192  AMRParticles<P>& a_particleData) const noexcept
1193 {
1194  CH_TIME("ParticleContainer::assignLocalParticles");
1195 
1196 #pragma omp parallel
1197  {
1198 #pragma omp single
1199  {
1200  for (auto mapIter = a_mappedParticles.begin(); mapIter != a_mappedParticles.end(); mapIter++) {
1201 #pragma omp task firstprivate(mapIter)
1202  {
1203  const unsigned int gridLevel = mapIter->first.first;
1204  const unsigned int gridIndex = mapIter->first.second;
1205  const DataIndex din = m_levelTiles[gridLevel]->getMyGrids().at(gridIndex);
1206 
1207  List<P>& particles = mapIter->second;
1208  List<P>& myParticles = (*a_particleData[gridLevel])[din].listItems();
1209 
1210  myParticles.catenate(particles);
1211  }
1212  }
1213  }
1214  }
1215 
1216  a_mappedParticles.clear();
1217 }
1218 
1219 template <class P>
1220 void
1222 {
1223  CH_TIME("ParticleContainer::preRegrid");
1224  if (m_verbose) {
1225  pout() << "ParticleContainer::preRegrid" << endl;
1226  }
1227 
1228  // TLDR: This routine takes all the particles on each level and puts them in a single list (per processor). After than
1229  // we can safely destruct the ParticleData on each level without losing the particles.
1230 
1231  CH_assert(m_isDefined);
1232  if (m_isOrganizedByCell) {
1233  MayDay::Error("ParticleContainer::preRegrid - particles are sorted by cell!");
1234  }
1235 
1236  // Fill cache particles on each level
1237  m_cacheParticles.resize(1 + m_finestLevel);
1238 
1239  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1240  m_cacheParticles[lvl] = RefCountedPtr<ParticleData<P>>(
1241  new ParticleData<P>(m_grids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
1242 
1243  const DisjointBoxLayout& dbl = m_grids[lvl];
1244  const DataIterator& dit = dbl.dataIterator();
1245 
1246  const int nbox = dit.size();
1247 
1248 #pragma omp parallel for schedule(runtime)
1249  for (int mybox = 0; mybox < nbox; mybox++) {
1250  const DataIndex& din = dit[mybox];
1251 
1252  List<P>& cacheParticles = (*m_cacheParticles[lvl])[din].listItems();
1253  List<P>& solverParticles = (*m_particles[lvl])[din].listItems();
1254 
1255  cacheParticles.catenate(solverParticles);
1256  }
1257  }
1258 }
1259 
1260 template <class P>
1261 void
1262 ParticleContainer<P>::regrid(const Vector<DisjointBoxLayout>& a_grids,
1263  const Vector<ProblemDomain>& a_domains,
1264  const Vector<Real>& a_dx,
1265  const Vector<int>& a_refRat,
1266  const Vector<ValidMask>& a_validMask,
1267  const Vector<RefCountedPtr<LevelTiles>>& a_levelTiles,
1268  const int a_lmin,
1269  const int a_newFinestLevel)
1270 {
1271  CH_TIME("ParticleContainer::regrid");
1272  if (m_verbose) {
1273  pout() << "ParticleContainer::regrid" << endl;
1274  }
1275 
1276  // TLDR: This is the main regrid routine for particle container. This is essentially a ::define() followed by several types of remapping steps
1277  CH_assert(m_isDefined);
1278 
1279  if (m_isOrganizedByCell) {
1280  MayDay::Error("ParticleContainer::regrid(...) - particles are sorted by cell!");
1281  }
1282 
1283  // Update this stuff
1284  m_grids = a_grids;
1285  m_domains = a_domains;
1286  m_refRat = a_refRat;
1287  m_validRegion = a_validMask;
1288  m_finestLevel = a_newFinestLevel;
1289  m_levelTiles = a_levelTiles;
1290 
1291  m_dx.resize(1 + m_finestLevel);
1292  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1293  m_dx[lvl] = a_dx[lvl] * RealVect::Unit;
1294  }
1295 
1296  this->setupGrownGrids(a_lmin, m_finestLevel);
1297  this->setupParticleData(a_lmin, m_finestLevel);
1298 
1299  // Perform the remapping operation.
1300  const unsigned int numRanks = numProc();
1301  const unsigned int myRank = procID();
1302 
1303  using LevelAndIndex = std::pair<unsigned int, unsigned int>;
1304 
1305  // These are the particles that get sent to each MPI rank. The pair contains the grid level and grid index.
1306  // Each MPI rank (and OpenMP thread) goes through it's particles and checks if they've falled off the grid. If they have,
1307  // then the particles are moved onto appropriate lists that will get sent to each rank.
1308  std::vector<std::map<LevelAndIndex, List<P>>> particlesToSend(numRanks);
1309 
1310 #pragma omp parallel
1311  {
1312  List<P> outcasts;
1313 
1314  std::vector<std::map<LevelAndIndex, List<P>>> threadLocalParticlesToSend(numRanks);
1315 
1316  // Collect particles owned by this rank/thread.
1317  this->transferParticlesToSingleList(outcasts, m_cacheParticles);
1318 
1319  // Map the particles to other grid patches
1320  this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
1321 
1322  // Catenate the thread-local particle lists onto particlesToSend
1323 #pragma omp critical
1324  this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
1325  }
1326 
1327  // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
1328  this->assignLocalParticles(particlesToSend[myRank], m_particles);
1329 
1330  // If using MPI then we have to scatter the particles across MPI ranks.
1331 #ifdef CH_MPI
1332  std::map<LevelAndIndex, List<P>> receivedParticles;
1333 
1334  ParticleOps::scatterParticles(receivedParticles, particlesToSend);
1335 
1336  // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
1337  // we got from there.
1338  this->assignLocalParticles(receivedParticles, m_particles);
1339 #endif
1340 
1341  if (m_debug) {
1342  this->sanityCheck();
1343  }
1344 
1345  // Discard transient storage.
1346  m_cacheParticles.resize(0);
1347 }
1348 
1349 template <class P>
1350 template <Real& (P::*particleScalarField)()>
1351 void
1353 {
1354  CH_TIME("ParticleContainer::setValue");
1355 
1356  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1357  const DisjointBoxLayout& dbl = m_grids[lvl];
1358  const DataIterator& dit = dbl.dataIterator();
1359 
1360  const int nbox = dit.size();
1361 
1362 #pragma omp parallel for schedule(runtime)
1363  for (int mybox = 0; mybox < nbox; mybox++) {
1364  const DataIndex& din = dit[mybox];
1365 
1366  List<P>& patchParticles = (*m_particles[lvl])[din].listItems();
1367 
1368  for (ListIterator<P> lit(patchParticles); lit.ok(); ++lit) {
1369  P& p = lit();
1370 
1371  (p.*particleScalarField)() = a_value;
1372  }
1373  }
1374  }
1375 }
1376 
1377 template <class P>
1378 template <RealVect& (P::*particleVectorField)()>
1379 void
1380 ParticleContainer<P>::setValue(const RealVect a_value)
1381 {
1382  CH_TIME("ParticleContainer::setValue");
1383 
1384  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1385  const DisjointBoxLayout& dbl = m_grids[lvl];
1386  const DataIterator& dit = dbl.dataIterator();
1387 
1388  const int nbox = dit.size();
1389 
1390 #pragma omp parallel for schedule(runtime)
1391  for (int mybox = 0; mybox < nbox; mybox++) {
1392  const DataIndex& din = dit[mybox];
1393 
1394  List<P>& patchParticles = (*m_particles[lvl])[din].listItems();
1395 
1396  for (ListIterator<P> lit(patchParticles); lit.ok(); ++lit) {
1397  P& p = lit();
1398 
1399  (p.*particleVectorField)() = a_value;
1400  }
1401  }
1402  }
1403 }
1404 
1405 template <class P>
1406 unsigned long long
1408 {
1409  CH_TIME("ParticleContainer::getNumberOfValidParticlesLocal");
1410 
1411  CH_assert(m_isDefined);
1412 
1413  unsigned long long n = 0;
1414 
1415  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1416  const DisjointBoxLayout& dbl = m_grids[lvl];
1417  const DataIterator& dit = dbl.dataIterator();
1418 
1419  const int nbox = dit.size();
1420 
1421 #pragma omp parallel for schedule(runtime) reduction(+ : n)
1422  for (int mybox = 0; mybox < nbox; mybox++) {
1423  const DataIndex& din = dit[mybox];
1424 
1425  const List<P>& particles = (*m_particles[lvl])[din].listItems();
1426 
1427  n += (unsigned long long)particles.length();
1428  }
1429  }
1430 
1431  return n;
1432 }
1433 
1434 template <class P>
1435 unsigned long long
1437 {
1438  CH_TIME("ParticleContainer::getNumberOfValidParticlesGlobal");
1439 
1440  CH_assert(m_isDefined);
1441 
1442  const unsigned long long n = this->getNumberOfValidParticlesLocal();
1443 
1444  return ParallelOps::sum(n);
1445 }
1446 
1447 template <class P>
1448 unsigned long long
1450 {
1451  CH_TIME("ParticleContainer::getNumberOfOutcastParticlesLocal");
1452 
1453  CH_assert(m_isDefined);
1454 
1455  unsigned long long n = 0;
1456 
1457  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1458  const DisjointBoxLayout& dbl = m_grids[lvl];
1459  const DataIterator& dit = dbl.dataIterator();
1460 
1461  const int nbox = dit.size();
1462 
1463 #pragma omp parallel for schedule(runtime) reduction(+ : n)
1464  for (int mybox = 0; mybox < nbox; mybox++) {
1465  const DataIndex& din = dit[mybox];
1466 
1467  const List<P>& outcast = m_particles[lvl]->outcast();
1468 
1469  n += (unsigned long long)outcast.length();
1470  }
1471  }
1472 
1473  return n;
1474 }
1475 
1476 template <class P>
1477 unsigned long long
1479 {
1480  CH_TIME("ParticleContainer::getNumberOfOutcastParticlesGlobal");
1481 
1482  CH_assert(m_isDefined);
1483 
1484  const unsigned long long n = this->getNumberOfOutcastParticlesLocal();
1485 
1486  return ParallelOps::sum(n);
1487 }
1488 
1489 template <class P>
1490 unsigned long long
1492 {
1493  CH_TIME("ParticleContainer::getNumberOfMaskParticlesLocal");
1494 
1495  CH_assert(m_isDefined);
1496 
1497  unsigned long long n = 0;
1498 
1499  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1500  const DisjointBoxLayout& dbl = m_grids[lvl];
1501  const DataIterator& dit = dbl.dataIterator();
1502 
1503  const int nbox = dit.size();
1504 
1505 #pragma omp parallel for schedule(runtime) reduction(+ : n)
1506  for (int mybox = 0; mybox < nbox; mybox++) {
1507  const DataIndex& din = dit[mybox];
1508 
1509  const List<P>& maskParticles = (*m_maskParticles[lvl])[din].listItems();
1510 
1511  n += (unsigned long long)maskParticles.length();
1512  }
1513  }
1514 
1515  return n;
1516 }
1517 
1518 template <class P>
1519 unsigned long long
1521 {
1522  CH_TIME("ParticleContainer::getNumberOfMaskParticlesGlobal");
1523 
1524  CH_assert(m_isDefined);
1525 
1526  const unsigned long long n = this->getNumberOfMaskParticlesLocal();
1527 
1528  return ParallelOps::sum(n);
1529 }
1530 
1531 template <class P>
1532 void
1533 ParticleContainer<P>::copyMaskParticles(const Vector<RefCountedPtr<LevelData<BaseFab<bool>>>>& a_mask) const
1534 {
1535  CH_TIME("ParticleContainer::copyMaskParticles(amr)");
1536 
1537  CH_assert(m_isDefined);
1538 
1539  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1540  if (!a_mask[lvl].isNull()) {
1541  this->copyMaskParticles(lvl, *a_mask[lvl]);
1542  }
1543  }
1544 }
1545 
1546 template <class P>
1547 void
1548 ParticleContainer<P>::copyMaskParticles(const int a_level, const LevelData<BaseFab<bool>>& a_mask) const
1549 {
1550  CH_TIME("ParticleContainer::copyMaskParticles(level)");
1551  if (m_verbose) {
1552  pout() << "ParticleContainer::copyMaskParticles(level)" << endl;
1553  }
1554 
1555  CH_assert(m_isDefined);
1556 
1557  m_maskParticles[a_level]->clear();
1558 
1559  const RealVect dx = m_dx[a_level];
1560 
1561  // Copy particles from m_particles to m_maskParticles if they lie in the input mask.
1562  const DisjointBoxLayout& dbl = m_grids[a_level];
1563  const DataIterator& dit = dbl.dataIterator();
1564 
1565  const int nbox = dit.size();
1566 
1567 #pragma omp parallel for schedule(runtime)
1568  for (int mybox = 0; mybox < nbox; mybox++) {
1569  const DataIndex& din = dit[mybox];
1570 
1571  const BaseFab<bool>& mask = a_mask[din];
1572  const Box gridBox = m_grids[a_level][din];
1573  const Box maskBox = mask.box();
1574 
1575  CH_assert(gridBox == maskBox);
1576 
1577  List<P>& maskParticles = (*m_maskParticles[a_level])[din].listItems();
1578  const List<P>& particles = (*m_particles[a_level])[din].listItems();
1579 
1580  for (ListIterator<P> lit(particles); lit.ok(); ++lit) {
1581  const RealVect x = lit().position();
1582  const IntVect iv = IntVect(D_DECL(std::floor((x[0] - m_probLo[0]) / dx[0]),
1583  std::floor((x[1] - m_probLo[1]) / dx[1]),
1584  std::floor((x[2] - m_probLo[2]) / dx[2])));
1585  if (!(maskBox.contains(iv))) {
1586  std::cout << a_level << "\t" << iv << "\t" << x << "\t" << maskBox << std::endl;
1587 
1588  MayDay::Error("ParticleContainer::copyMaskParticles -- logic bust. Particle has fallen off grid");
1589  }
1590  else if (mask(iv)) {
1591  maskParticles.add(lit());
1592  }
1593  }
1594  }
1595 }
1596 
1597 template <class P>
1598 void
1599 ParticleContainer<P>::transferMaskParticles(const Vector<RefCountedPtr<LevelData<BaseFab<bool>>>>& a_mask)
1600 {
1601  CH_TIME("ParticleContainer::transferMaskParticles(amr)");
1602 
1603  CH_assert(m_isDefined);
1604 
1605  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1606  if (!a_mask[lvl].isNull()) {
1607  this->transferMaskParticles(lvl, *a_mask[lvl]);
1608  }
1609  }
1610 }
1611 
1612 template <class P>
1613 void
1614 ParticleContainer<P>::transferMaskParticles(const int a_level, const LevelData<BaseFab<bool>>& a_mask)
1615 {
1616  CH_TIME("ParticleContainer::transferMaskParticles(level)");
1617  if (m_verbose) {
1618  pout() << "ParticleContainer::transferMaskParticles(level)" << endl;
1619  }
1620 
1621  CH_assert(m_isDefined);
1622 
1623  const RealVect dx = m_dx[a_level];
1624 
1625  // Copy particles from m_particles to m_maskParticles if they lie in the input mask.
1626  const DisjointBoxLayout& dbl = m_grids[a_level];
1627  const DataIterator& dit = dbl.dataIterator();
1628 
1629  const int nbox = dit.size();
1630 
1631 #pragma omp parallel for schedule(runtime)
1632  for (int mybox = 0; mybox < nbox; mybox++) {
1633  const DataIndex& din = dit[mybox];
1634 
1635  const BaseFab<bool>& mask = a_mask[din];
1636  const Box gridBox = m_grids[a_level][din];
1637  const Box maskBox = mask.box();
1638 
1639  CH_assert(gridBox == maskBox);
1640 
1641  List<P>& maskParticles = (*m_maskParticles[a_level])[din].listItems();
1642  const List<P>& particles = (*m_particles[a_level])[din].listItems();
1643 
1644  for (ListIterator<P> lit(particles); lit.ok();) {
1645  const RealVect x = lit().position();
1646  const IntVect iv = IntVect(D_DECL(std::floor((x[0] - m_probLo[0]) / dx[0]),
1647  std::floor((x[1] - m_probLo[1]) / dx[1]),
1648  std::floor((x[2] - m_probLo[2]) / dx[2])));
1649  if (!(maskBox.contains(iv))) {
1650  // std::cout << a_level << "\t" << iv << "\t" << x << std::endl;
1651 
1652  MayDay::Warning("ParticleContainer::transferMaskParticles -- logic bust. Particle has fallen off grid");
1653 
1654  ++lit;
1655  }
1656  else {
1657  if (mask(iv)) {
1658  maskParticles.transfer(lit);
1659  }
1660  else {
1661  ++lit;
1662  }
1663  }
1664  }
1665  }
1666 }
1667 
1668 template <class P>
1669 void
1671 {
1672  CH_assert(m_isDefined);
1673 
1674  this->clear(m_particles);
1675 }
1676 
1677 template <class P>
1678 void
1680 {
1681  CH_assert(m_isDefined);
1682 
1683  this->clear(m_bufferParticles);
1684 }
1685 
1686 template <class P>
1687 void
1689 {
1690  CH_assert(m_isDefined);
1691 
1692  this->clear(m_maskParticles);
1693 }
1694 
1695 template <class P>
1696 void
1698 {
1699  CH_assert(m_isDefined);
1700 
1701  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1702  ParticleData<P>& particleData = *m_particles[lvl];
1703 
1704  List<P>& outcast = particleData.outcast();
1705 
1706  outcast.clear();
1707  }
1708 }
1709 
1710 template <class P>
1711 void
1713 {
1714  if (m_verbose) {
1715  pout() << "ParticleContainer::clear(AMRParticles)" << endl;
1716  }
1717 
1718  CH_assert(m_isDefined);
1719 
1720  for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1721 
1722  ParticleData<P>& levelParticles = *a_particles[lvl];
1723 
1724  // Clear patch particles
1725  const BoxLayout& grids = levelParticles.getBoxes();
1726  const DataIterator& dit = grids.dataIterator();
1727 
1728  const int nbox = dit.size();
1729 
1730 #pragma omp parallel for schedule(runtime)
1731  for (int mybox = 0; mybox < nbox; mybox++) {
1732  const DataIndex din = dit[mybox];
1733 
1734  List<P>& patchParticles = levelParticles[din].listItems();
1735 
1736  patchParticles.clear();
1737  }
1738 
1739  // Clear outcast
1740  List<P>& outcast = levelParticles.outcast();
1741  outcast.clear();
1742  }
1743 }
1744 
1745 #include <CD_NamespaceFooter.H>
1746 
1747 #endif
Declaration of a namespace for proto-typing grid and EB loops.
Declaration of various useful OpenMP-related utilities.
Agglomeration of basic MPI reductions.
Declaration of a class for holding particles on an AMR hierarchy.
Vector< RefCountedPtr< LayoutData< BinFab< P > >> > AMRCellParticles
Alias for cell-sorted particles on each AMR level.
Definition: CD_ParticleContainer.H:40
Vector< RefCountedPtr< ParticleData< P > >> AMRParticles
Alias for ParticleData on each AMR level.
Definition: CD_ParticleContainer.H:34
Declaration of a static class containing some common useful particle routines that would otherwise be...
Implementation of CD_Timer.H.
Templated class for holding particles on an AMR hierarchy with particle remapping.
Definition: CD_ParticleContainer.H:50
AMRCellParticles< P > & getCellParticles()
Get all cell particles.
Definition: CD_ParticleContainerImplem.H:358
void getCellParticlesDestructive(BinFab< P > &a_cellParticles, const int a_lvl, const DataIndex a_dit)
Fill a cell-sorted particle data holder with all the particles in the grid patch. The original partic...
Definition: CD_ParticleContainerImplem.H:428
void organizeParticlesByPatch()
Sort particles by cell.
Definition: CD_ParticleContainerImplem.H:508
void define(const Vector< DisjointBoxLayout > &a_grids, const Vector< ProblemDomain > &a_domains, const Vector< Real > &a_dx, const Vector< int > &a_refRat, const Vector< ValidMask > &a_validMask, const Vector< RefCountedPtr< LevelTiles >> &a_levelTiles, const RealVect &a_probLo, const int a_blockingFactor, const int a_finestLevel, const std::string a_realm)
Define the container. This will do a clear-out of all particles.
Definition: CD_ParticleContainerImplem.H:77
const Vector< RealVect > getDx() const noexcept
Get grid resolutions.
Definition: CD_ParticleContainerImplem.H:263
void clearMaskParticles() const
Clear the "mask" particles.
Definition: CD_ParticleContainerImplem.H:1688
unsigned long long getNumberOfOutcastParticlesGlobal() const
Get global number of particles.
Definition: CD_ParticleContainerImplem.H:1478
void setupParticleData(const int a_base, const int a_finestLevel)
Setup function for the particle data (m_particles and m_maskParticles)
Definition: CD_ParticleContainerImplem.H:154
const RealVect getProbLo() const noexcept
Get lower-left corner.
Definition: CD_ParticleContainerImplem.H:256
unsigned long long getNumberOfValidParticlesLocal() const
Get local number of particles.
Definition: CD_ParticleContainerImplem.H:1407
void catenateParticleMaps(std::vector< std::map< std::pair< unsigned int, unsigned int >, List< P >>> &a_globalParticles, std::vector< std::map< std::pair< unsigned int, unsigned int >, List< P >>> &a_localParticles) const noexcept
Catenate the particles. This is usually called within OpenMP parallel regions.
Definition: CD_ParticleContainerImplem.H:1170
void setValue(const Real a_value)
Set the particle member to the input value.
Definition: CD_ParticleContainerImplem.H:1352
void preRegrid(const int a_base)
Cache particles before calling regrid.
Definition: CD_ParticleContainerImplem.H:1221
virtual ~ParticleContainer()
Destructor ( does nothing)
Definition: CD_ParticleContainerImplem.H:70
void sortParticles() noexcept
Sort particles according to the < operator in the particle.
Definition: CD_ParticleContainerImplem.H:197
const Vector< DisjointBoxLayout > & getGrids() const
Get the AMR grids.
Definition: CD_ParticleContainerImplem.H:249
unsigned long long getNumberOfOutcastParticlesLocal() const
Get local number of particles.
Definition: CD_ParticleContainerImplem.H:1449
ParticleData< P > & operator[](const int a_level)
Get particle data on a level.
Definition: CD_ParticleContainerImplem.H:332
void addParticles(const List< P > &a_particles)
Add particles to container.
Definition: CD_ParticleContainerImplem.H:551
void setupGrownGrids(const int a_base, const int a_finestLevel)
Set up grown grids.
Definition: CD_ParticleContainerImplem.H:123
unsigned long long getNumberOfMaskParticlesGlobal() const
Get the number particles in the halo cells.
Definition: CD_ParticleContainerImplem.H:1520
void transferMaskParticles(const Vector< RefCountedPtr< LevelData< BaseFab< bool >>>> &a_mask)
Copy particles to the mask particle data holder.
Definition: CD_ParticleContainerImplem.H:1599
void mapParticlesToAMRGrid(std::vector< std::map< std::pair< unsigned int, unsigned int >, List< P >>> &a_mappedParticles, List< P > &a_unmappedParticles) const noexcept
Iterate through the unmapped particles and map them to proper level, grid indices,...
Definition: CD_ParticleContainerImplem.H:1110
void clear(AMRParticles< P > &a_particles) const
Clear particles on input data holder.
Definition: CD_ParticleContainerImplem.H:1712
void remap()
Remap over the entire AMR hierarchy.
Definition: CD_ParticleContainerImplem.H:973
void assignLocalParticles(std::map< std::pair< unsigned int, unsigned int >, List< P >> &a_mappedParticles, AMRParticles< P > &a_particleData) const noexcept
Gather particles locally.
Definition: CD_ParticleContainerImplem.H:1191
void clearParticles()
Clear all particles.
Definition: CD_ParticleContainerImplem.H:1670
void clearOutcast() noexcept
Clear outcast particles.
Definition: CD_ParticleContainerImplem.H:1697
unsigned long long getNumberOfMaskParticlesLocal() const
Get the number particles in the halo cells.
Definition: CD_ParticleContainerImplem.H:1491
void organizeParticlesByCell()
Sort particles by cell.
Definition: CD_ParticleContainerImplem.H:474
int getFinestLevel() const
Get finest AMR level.
Definition: CD_ParticleContainerImplem.H:231
void clearBufferParticles() const
Clear the buffer particles.
Definition: CD_ParticleContainerImplem.H:1679
const std::string getRealm() const
Get the realm where this ParticleContainer lives.
Definition: CD_ParticleContainerImplem.H:240
void transferParticlesToSingleList(List< P > &a_list, AMRParticles< P > &a_particles) const noexcept
Gather the particles onto a single list.
Definition: CD_ParticleContainerImplem.H:1064
bool isOrganizedByCell() const
Is cell-sorted or not.
Definition: CD_ParticleContainerImplem.H:224
void regrid(const Vector< DisjointBoxLayout > &a_grids, const Vector< ProblemDomain > &a_domains, const Vector< Real > &a_dx, const Vector< int > &a_refRat, const Vector< ValidMask > &a_validMask, const Vector< RefCountedPtr< LevelTiles >> &a_levelTiles, const int a_base, const int a_newFinestLevel)
Regrid function. a_base is the coarsest grid level which did not change.
Definition: CD_ParticleContainerImplem.H:1262
void copyParticlesToSingleList(List< P > &a_list, const AMRParticles< P > &a_particles) const noexcept
Copy the input particles onto a single list.
Definition: CD_ParticleContainerImplem.H:1087
ParticleContainer()
Default constructor. Leaves object in undefined state.
Definition: CD_ParticleContainerImplem.H:34
void sanityCheck() const noexcept
Run a sanity check and make sure all particles are in their correctly assigned box.
Definition: CD_ParticleContainerImplem.H:1038
AMRParticles< P > & getMaskParticles()
Get the mask particles.
Definition: CD_ParticleContainerImplem.H:314
unsigned long long getNumberOfValidParticlesGlobal() const
Get global number of particles.
Definition: CD_ParticleContainerImplem.H:1436
void addParticlesDestructive(List< P > &a_particles)
Add particles to container. The input particles are destroyed by this routine.
Definition: CD_ParticleContainerImplem.H:617
AMRParticles< P > & getBufferParticles()
Get buffer particles on all levels.
Definition: CD_ParticleContainerImplem.H:296
void copyMaskParticles(const Vector< RefCountedPtr< LevelData< BaseFab< bool >>>> &a_mask) const
Copy particles to mask particle data holder.
Definition: CD_ParticleContainerImplem.H:1533
AMRParticles< P > & getParticles()
Get all particles on all levels.
Definition: CD_ParticleContainerImplem.H:270
void transferParticles(ParticleContainer< P > &a_otherContainer)
Move particles into this container.
Definition: CD_ParticleContainerImplem.H:914
static IntVect getParticleCellIndex(const RealVect &a_particlePosition, const RealVect &a_probLo, const Real &a_dx) noexcept
Get the cell index corresponding to the particle position.
Definition: CD_ParticleOpsImplem.H:26
ALWAYS_INLINE void loop(const Box &a_computeBox, Functor &&kernel, const IntVect &a_stride=IntVect::Unit)
Launch a C++ kernel over a regular grid.
Definition: CD_BoxLoopsImplem.H:20
RealVect position(const Location::Cell a_location, const VolIndex &a_vof, const EBISBox &a_ebisbox, const Real &a_dx)
Compute the position (ignoring the "origin) of a Vof.
Definition: CD_LocationImplem.H:20
Real sum(const Real &a_value) noexcept
Compute the sum across all MPI ranks.
Definition: CD_ParallelOpsImplem.H:353