chombo-discharge
Loading...
Searching...
No Matches
CD_ParticleContainerImplem.H
Go to the documentation of this file.
1/* chombo-discharge
2 * Copyright © 2021 SINTEF Energy Research.
3 * Please refer to Copyright.txt and LICENSE in the chombo-discharge root directory.
4 */
5
12#ifndef CD_ParticleContainerImplem_H
13#define CD_ParticleContainerImplem_H
14
15// Std includes
16#include <bitset>
17
18// Chombo includes
19#include <ParmParse.H>
20#include <CH_Timer.H>
21#include <MPI_util.H>
22#include <ParticleDataI.H>
23
24// Our includes
26#include <CD_ParallelOps.H>
27#include <CD_ParticleOps.H>
28#include <CD_Timer.H>
29#include <CD_BoxLoops.H>
30#include <CD_OpenMP.H>
31#include <CD_NamespaceHeader.H>
32
33template <class P>
35{
36 m_isDefined = false;
37 m_isOrganizedByCell = false;
38 m_profile = false;
39 m_debug = false;
40 m_verbose = false;
41}
42
43template <class P>
46 const Vector<Real>& a_dx,
47 const Vector<int>& a_refRat,
50 const RealVect& a_probLo,
51 const int a_blockingFactor,
52 const int a_finestLevel,
53 const std::string a_realm)
54{
55 CH_TIME("ParticleContainer::ParticleContainer");
56
57 this->define(a_grids,
59 a_dx,
66 a_realm);
67}
68
69template <class P>
71{
72 CH_TIME("ParticleContainer::~ParticleContainer");
74
75template <class P>
76void
79 const Vector<Real>& a_dx,
80 const Vector<int>& a_refRat,
83 const RealVect& a_probLo,
84 const int a_blockingFactor,
85 const int a_finestLevel,
86 const std::string a_realm)
88 CH_TIME("ParticleContainer::~ParticleContainer");
89
90 m_grids = a_grids;
91 m_domains = a_domains;
92 m_refRat = a_refRat;
93 m_validRegion = a_validMask;
94 m_probLo = a_probLo;
95 m_finestLevel = a_finestLevel;
97 m_blockingFactor = a_blockingFactor;
98 m_levelTiles = a_levelTiles;
99
100 m_dx.resize(1 + m_finestLevel);
101 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
102 m_dx[lvl] = a_dx[lvl] * RealVect::Unit;
104
105 constexpr int base = 0;
106
107 // Do the define stuff.
108 this->setupGrownGrids(base, m_finestLevel);
109 this->setupParticleData(base, m_finestLevel);
110
111 m_isDefined = true;
112 m_isOrganizedByCell = false;
113 m_profile = false;
114
115 ParmParse pp("ParticleContainer");
116 pp.query("profile", m_profile);
117 pp.query("debug", m_debug);
118 pp.query("verbose", m_verbose);
119}
120
121template <class P>
122void
124{
125 CH_TIME("ParticleContainer::setupGrownGrids");
126 if (m_verbose) {
127 pout() << "ParticleContainer::setupGrownGrids" << endl;
128 }
129
130 // TLDR: This routine sets up the buffer grids which can be used when we need to fetch particles that fall off each levels' grid. This is very useful
131 // when we want to fetch particles that lie on the coarse grid of a refinement boundary.
132
133 m_grownGrids.resize(1 + a_finestLevel);
134
135 for (int lvl = 0; lvl <= a_finestLevel; lvl++) {
136 const DisjointBoxLayout& dbl = m_grids[lvl];
137 const ProblemDomain& domain = m_domains[lvl];
138
139 // Grow boxes by refinement factor on the finer levels.
140 Vector<Box> boxes = dbl.boxArray();
141 if (lvl > 0) {
142 for (auto& box : boxes.stdVector()) {
143 box.grow(m_refRat[lvl - 1]);
144 box &= domain;
145 }
147
148 m_grownGrids[lvl] = BoxLayout(boxes, dbl.procIDs());
149 }
150}
151
152template <class P>
153void
155{
156 CH_TIME("ParticleContainer::setupParticleData");
157 if (m_verbose) {
158 pout() << "ParticleContainer::setupParticleData" << endl;
159 }
160
161 // TLDR: This sets up the most commonly used particle data holders for this particle AMR container. This means that
162 // we allocate:
163 //
164 // 1. The "normal" particle container data holder m_particles. This is defined on the DisjointBoxLayout
165 // which is the natural place for the particles to live.
166 //
167 // 2. A buffer particle data holder which is defined on a grown DisjointBoxLayout. This is useful when particles
168 // on the coarse level need to deposit to the fine level.
169 //
170 // 3. A data holder for "masked particles", providing an opportunity to copy/transfer some of the particles on a grid
171 // level to a separate data holder if they lie within a "mask". Typically used for extracting coarse-level that live
172 // just outside the fine grid (i.e. on the coarse side of the refinement boundary).
173 //
174 // 5. A data holder for storing cell-sorted particles. Very useful when particles need to be sorted by cell rather than patch.
175
176 m_particles.resize(1 + a_finestLevel);
177 m_bufferParticles.resize(1 + a_finestLevel);
178 m_maskParticles.resize(1 + a_finestLevel);
179 m_cellSortedParticles.resize(1 + a_finestLevel);
180
181 for (int lvl = a_base; lvl <= a_finestLevel; lvl++) {
183 new ParticleData<P>(m_grids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
185 m_bufferParticles[lvl] = RefCountedPtr<ParticleData<P>>(
186 new ParticleData<P>(m_grownGrids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
187
188 m_maskParticles[lvl] = RefCountedPtr<ParticleData<P>>(
189 new ParticleData<P>(m_grids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
191 m_cellSortedParticles[lvl] = RefCountedPtr<LayoutData<BinFab<P>>>(new LayoutData<BinFab<P>>(m_grids[lvl]));
192 }
193}
194
195template <class P>
196void
198{
199 CH_TIME("ParticleContainer::sortParticles");
200 if (m_verbose) {
201 pout() << "ParticleContainer::sortParticles" << endl;
203
204 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
205
206 const DisjointBoxLayout& dbl = m_grids[lvl];
207 const DataIterator& dit = dbl.dataIterator();
209 const int nbox = dit.size();
210
211#pragma omp parallel for schedule(runtime)
212 for (int mybox = 0; mybox < nbox; mybox++) {
213 const DataIndex& din = dit[mybox];
214
216
217 particles.sort();
218 }
219 }
220}
222template <class P>
223bool
225{
226 return m_isOrganizedByCell;
228
229template <class P>
230int
232{
233 CH_assert(m_isDefined);
234
235 return m_finestLevel;
236}
237
238template <class P>
239const std::string
241{
242 CH_assert(m_isDefined);
243
244 return m_realm;
245}
246
247template <class P>
250{
251 return m_grids;
252}
253
254template <class P>
255const RealVect
257{
258 return m_probLo;
259}
260
261template <class P>
264{
265 return m_dx;
266}
267
268template <class P>
271{
272 CH_assert(m_isDefined);
273
274 if (m_isOrganizedByCell) {
275 MayDay::Error("ParticleContainer::getParticles - particles are sorted by cell!");
276 }
277
278 return m_particles;
279}
280
281template <class P>
282const AMRParticles<P>&
284{
285 CH_assert(m_isDefined);
286
287 if (m_isOrganizedByCell) {
288 MayDay::Abort("ParticleContainer::getParticles - particles are sorted by cell!");
289 }
290
291 return m_particles;
292}
293
294template <class P>
297{
298 CH_assert(m_isDefined);
299
300 return m_bufferParticles;
301}
303template <class P>
304const AMRParticles<P>&
306{
307 CH_assert(m_isDefined);
308
309 return m_bufferParticles;
310}
311
312template <class P>
315{
316 CH_assert(m_isDefined);
317
318 return m_maskParticles;
319}
320
321template <class P>
322const AMRParticles<P>&
324{
325 CH_assert(m_isDefined);
326
327 return m_maskParticles;
328}
329
330template <class P>
333{
334 CH_assert(m_isDefined);
335
336 if (m_isOrganizedByCell) {
337 MayDay::Error("ParticleContainer::operator[](a_lvl) - particles are sorted by cell!");
338 }
340 return *m_particles[a_lvl];
341}
342
343template <class P>
344const ParticleData<P>&
346{
347 CH_assert(m_isDefined);
349 if (m_isOrganizedByCell) {
350 MayDay::Error("ParticleContainer::operator[](a_lvl) - particles are sorted by cell!");
351 }
352
353 return *m_particles[a_level];
354}
355
356template <class P>
359{
360 CH_assert(m_isDefined);
361
362 if (!m_isOrganizedByCell) {
363 MayDay::Error("ParticleContainer::getCellParticles()- particles are not sorted by cell!");
364 }
365
366 return m_cellSortedParticles;
368
369template <class P>
372{
373 CH_assert(m_isDefined);
374
375 if (!m_isOrganizedByCell) {
376 MayDay::Error("ParticleContainer::getCellParticles()- particles are not sorted by cell!");
378
379 return m_cellSortedParticles;
380}
381
382template <class P>
385{
386 CH_assert(m_isDefined);
387
388 if (!m_isOrganizedByCell) {
389 MayDay::Error("ParticleContainer::getCellParticles(level)- particles are not sorted by cell!");
390 }
392 return *m_cellSortedParticles[a_level];
393}
394
395template <class P>
399 CH_TIME("ParticleContainer::getCellParticles(int)");
400
401 CH_assert(m_isDefined);
402
403 if (!m_isOrganizedByCell) {
404 MayDay::Error("ParticleContainer::getCellParticles(level)- particles are not sorted by cell!");
406
407 return *m_cellSortedParticles[a_level];
408}
409
410template <class P>
411void
413{
414 CH_TIME("ParticleContainer::getCellParticles(BinFab)");
415
416 CH_assert(m_isDefined);
417
418 if (!m_isOrganizedByCell) {
419 MayDay::Error("ParticleContainer::getCellParticles - particles are not sorted by cell!");
420 }
421
422 cellParticles.define(m_grids[a_lvl][a_dit], m_dx[a_lvl], m_probLo);
424}
425
426template <class P>
427void
429{
430 CH_TIME("ParticleContainer::getCellParticlesDestructive");
431
432 CH_assert(m_isDefined);
433
434 if (!m_isOrganizedByCell) {
435 MayDay::Error("ParticleContainer::getCellParticlesDestructive - particles are not sorted by cell!");
436 }
438 cellParticles.define(m_grids[a_lvl].get(a_dit), m_dx[a_lvl], m_probLo);
439 cellParticles.addItemsDestructive((*m_particles[a_lvl])[a_dit].listItems());
440}
441
442template <class P>
445{
446 CH_TIME("ParticleContainer::getCellParticles(int, DataIndex)");
447
448 CH_assert(m_isDefined);
450 if (!m_isOrganizedByCell) {
451 MayDay::Error("ParticleContainer::getCellParticles(int, dit) - particles are not sorted by cell!");
452 }
453
454 return (*m_cellSortedParticles[a_level])[a_dit];
455}
456
457template <class P>
458const BinFab<P>&
460{
461 CH_TIME("ParticleContainer::getCellParticles(int, DataIndex)");
462
463 CH_assert(m_isDefined);
464
465 if (!m_isOrganizedByCell) {
466 MayDay::Error("ParticleContainer::getCellParticles(int, dit) - particles are not sorted by cell!");
467 }
468
469 return (*m_cellSortedParticles[a_level])[a_dit];
470}
471
472template <class P>
473void
475{
476 CH_TIME("ParticleContainer::organizeParticlesByCell");
477 if (m_verbose) {
478 pout() << "ParticleContainer::organizeParticlesByCell" << endl;
479 }
480
481 CH_assert(m_isDefined);
482
483 if (!m_isOrganizedByCell) {
484
485 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
486 const DisjointBoxLayout& dbl = m_grids[lvl];
487 const DataIterator& dit = dbl.dataIterator();
488
489 const int nbox = dit.size();
490
491#pragma omp parallel for schedule(runtime)
492 for (int mybox = 0; mybox < nbox; mybox++) {
493 const DataIndex& din = dit[mybox];
494
495 BinFab<P>& cellParticles = (*m_cellSortedParticles[lvl])[din];
497 cellParticles.define(dbl[din], m_dx[lvl], m_probLo);
498 cellParticles.addItemsDestructive((*m_particles[lvl])[din].listItems());
499 }
500 }
501
502 m_isOrganizedByCell = true;
503 }
504}
505
506template <class P>
507void
509{
510 CH_TIME("ParticleContainer::organizeParticlesByPatch");
511 if (m_verbose) {
512 pout() << "ParticleContainer::organizeParticlesByPatch" << endl;
513 }
515 CH_assert(m_isDefined);
516
517 if (m_isOrganizedByCell) {
518
519 constexpr int comp = 0;
521 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
522 const DisjointBoxLayout& dbl = m_grids[lvl];
523 const DataIterator& dit = dbl.dataIterator();
524
525 const int nbox = dit.size();
527#pragma omp parallel for schedule(runtime)
528 for (int mybox = 0; mybox < nbox; mybox++) {
529 const DataIndex& din = dit[mybox];
530
532 BinFab<P>& cellParticles = (*m_cellSortedParticles[lvl])[din];
533
534 // Kernel which adds moves particles from the cell container to the patch container.
535 auto kernel = [&](const IntVect& iv) -> void {
536 patchParticles.addItemsDestructive(cellParticles(iv, comp));
537 };
538
540
541 cellParticles.clear();
542 }
543 }
544
545 m_isOrganizedByCell = false;
546 }
547}
548
549template <class P>
550void
552{
553 CH_TIME("ParticleContainer::addParticles");
554 if (m_verbose) {
555 pout() << "ParticleContainer::addParticles" << endl;
556 }
557
558 CH_assert(m_isDefined);
559
560 if (m_isOrganizedByCell) {
561 MayDay::Error("ParticleContainer::addParticles(List<P>) - particles are sorted by cell!");
562 }
563
564 // TLDR: This routine is almost a precise copy of the remap routine, with the exception that the particles to be remapped
565 // are only the input particles rather than everything inside m_particles
566
567 const uint32_t numRanks = numProc();
568 const uint32_t myRank = procID();
569
571
573
574#pragma omp parallel
575 {
577
578#pragma omp master
579 {
580 outcasts.join(a_particles);
581 }
582
584
585 // Collect particles owned by this rank/thread.
586 this->transferParticlesToSingleList(outcasts, m_particles);
587
588 // Map the particles to other grid patches
589 this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
590
591 // Catenate the thread-local particle lists onto particlesToSend
592#pragma omp critical
593 this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
594 }
595
596 // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
597 this->assignLocalParticles(particlesToSend[myRank], m_particles);
598
599 // If using MPI then we have to scatter the particles across MPI ranks.
600#ifdef CH_MPI
602
603 ParticleOps::scatterParticles(receivedParticles, particlesToSend);
604
605 // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
606 // we got from there.
607 this->assignLocalParticles(receivedParticles, m_particles);
608#endif
609
610 if (m_debug) {
611 this->sanityCheck();
612 }
613}
614
615template <class P>
616void
618{
619 CH_TIME("ParticleContainer::addParticlesDestructive");
620 if (m_verbose) {
621 pout() << "ParticleContainer::addParticlesDestructive" << endl;
622 }
623
624 CH_assert(m_isDefined);
625
626 if (m_isOrganizedByCell) {
627 MayDay::Error("ParticleContainer::addParticlesDestructive(List<P>) - particles are sorted by cell!");
628 }
629
630 // TLDR: This routine is almost a precise copy of the remap routine, with the exception that the particles to be remapped
631 // are only the input particles rather than everything inside m_particles
632
633 const uint32_t numRanks = numProc();
634 const uint32_t myRank = procID();
635
637
639
640#pragma omp parallel
641 {
643
644#pragma omp master
645 {
646 outcasts.catenate(a_particles);
647 }
648
650
651 // Collect particles owned by this rank/thread.
652 this->transferParticlesToSingleList(outcasts, m_particles);
653
654 // Map the particles to other grid patches
655 this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
656
657 // Catenate the thread-local particle lists onto particlesToSend
658#pragma omp critical
659 this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
660 }
662 // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
663 this->assignLocalParticles(particlesToSend[myRank], m_particles);
664
665 // If using MPI then we have to scatter the particles across MPI ranks.
666#ifdef CH_MPI
668
669 ParticleOps::scatterParticles(receivedParticles, particlesToSend);
670
671 // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
672 // we got from there.
673 this->assignLocalParticles(receivedParticles, m_particles);
674#endif
675
676 if (m_debug) {
677 this->sanityCheck();
679}
680
681template <class P>
682void
684{
685 CH_TIME("ParticleContainer::addParticles(BinFab)");
686 if (m_verbose) {
687 pout() << "ParticleContainer::addParticles(BinFab)" << endl;
688 }
689
690 CH_assert(m_isDefined);
691 CH_assert(m_grids[a_lvl].get(a_dit) == a_particles.getRegion());
692
693 if (!m_isOrganizedByCell) {
694 MayDay::Abort("ParticleContainer::addParticles(BinFab<P>) - particles are not sorted by cell!");
696
697 constexpr int comp = 0;
698
699 BinFab<P>& boxParticles = (*m_cellSortedParticles[a_lvl])[a_dit];
700
701 auto kernel = [&](const IntVect& iv) -> void {
706 };
707
708 BoxLoops::loop(m_grids[a_lvl][a_dit], kernel);
709}
710
711template <class P>
712void
714{
715 CH_TIME("ParticleContainer::addParticlesDestructive(BinFab)");
716 if (m_verbose) {
717 pout() << "ParticleContainer::addParticlesDestructive(BinFab)" << endl;
718 }
719
720 CH_assert(m_isDefined);
721 CH_assert(m_grids[a_lvl].get(a_dit) == a_particles.getRegion());
722
723 if (!m_isOrganizedByCell) {
724 MayDay::Error("ParticleContainer::addParticles(BinFab<P>) - particles are not sorted by cell!");
725 }
726
727 constexpr int comp = 0;
728
729 BinFab<P>& boxParticles = (*m_cellSortedParticles[a_lvl])[a_dit];
730
731 auto kernel = [&](const IntVect& iv) -> void {
734
735 myParticles.catenate(inputParticles);
736 };
737
738 BoxLoops::loop(m_grids[a_lvl][a_dit], kernel);
739}
740
741template <class P>
742void
744{
745 CH_TIME("ParticleContainer::addParticles(ParticleContainer)");
746 if (m_verbose) {
747 pout() << "ParticleContainer::addParticles(ParticleContainer)" << endl;
748 }
749
750 CH_assert(m_isDefined);
751
752 if (m_isOrganizedByCell) {
753 MayDay::Error("ParticleContainer::addParticles(ParticleContainer<P>) - particles are sorted by cell!");
754 }
755
756 // TLDR: This routine adds particles from a different container to this container. If the containers live on the same realm
757 // we just add the particles directly. Otherwise, we have to add to the outcast list and remap.
758
759 if (m_realm == a_otherContainer.getRealm()) {
760 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
761 const DisjointBoxLayout& dbl = m_grids[lvl];
762 const DataIterator& dit = dbl.dataIterator();
763
764 const int nbox = dit.size();
765
766#pragma omp parallel for schedule(runtime)
767 for (int mybox = 0; mybox < nbox; mybox++) {
768 const DataIndex& din = dit[mybox];
769
772
773 myParticles.addItems(otherParticles.listItems());
774 }
775 }
776 }
777 else {
778 // If we're adding particles across realms we need to collect the input particles onto lists that get remapped
779 // into m_particles. A full explanation of how this works is given in the remap routin.
780
781 const uint32_t numRanks = numProc();
782 const uint32_t myRank = procID();
783
785
787
788#pragma omp parallel
789 {
791
793
794 // Collect particles owned by this rank/thread, but on the other realm.
795 a_otherContainer.copyParticlesToSingleList(outcasts, a_otherContainer.getParticles());
796
797 // Map the particles to other grid patches
798 this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
799
800 // Catenate the thread-local particle lists onto particlesToSend
801#pragma omp critical
802 this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
803 }
804
805 // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
806 this->assignLocalParticles(particlesToSend[myRank], m_particles);
807
808 // If using MPI then we have to scatter the particles across MPI ranks.
809#ifdef CH_MPI
811
812 ParticleOps::scatterParticles(receivedParticles, particlesToSend);
813
814 // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
815 // we got from there.
816 this->assignLocalParticles(receivedParticles, m_particles);
817#endif
818 }
819
820 if (m_debug) {
821 this->sanityCheck();
822 }
823}
824
825template <class P>
826void
828{
829 CH_TIME("ParticleContainer::addParticlesDestructive(ParticleContainer)");
830 if (m_verbose) {
831 pout() << "ParticleContainer::addParticlesDestructive(ParticleContainer)" << endl;
832 }
833
834 // TLDR: This routine adds particles from a different container to this container. If the containers live on the same realm
835 // we just add the particles directly. Otherwise, we have to add to the outcast list and remap.
836
837 CH_assert(m_isDefined);
838
839 if (m_isOrganizedByCell) {
840 MayDay::Error("ParticleContainer::addParticles(ParticleContainer<P>) - particles are sorted by cell!");
841 }
842
843 // TLDR: This routine adds particles from a different container to this container. If the containers live on the same realm
844 // we just add the particles directly. Otherwise, we have to add to the outcast list and remap.
845
846 if (m_realm == a_otherContainer.getRealm()) {
847 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
848 const DisjointBoxLayout& dbl = m_grids[lvl];
849 const DataIterator& dit = dbl.dataIterator();
850
851 const int nbox = dit.size();
852
853#pragma omp parallel for schedule(runtime)
854 for (int mybox = 0; mybox < nbox; mybox++) {
855 const DataIndex& din = dit[mybox];
856
859
860 myParticles.addItems(otherParticles.listItems());
861 }
862 }
863 }
864 else {
865 // If we're adding particles across realms we need to collect the input particles onto lists that get remapped
866 // into m_particles. A full explanation of how this works is given in the remap routin.
867
868 const uint32_t numRanks = numProc();
869 const uint32_t myRank = procID();
870
872
874
875#pragma omp parallel
876 {
878
880
881 // Collect particles owned by this rank/thread, but on the other realm.
882 a_otherContainer.transferParticlesToSingleList(outcasts, a_otherContainer.getParticles());
883
884 // Map the particles to other grid patches
885 this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
886
887 // Catenate the thread-local particle lists onto particlesToSend
888#pragma omp critical
889 this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
890 }
891
892 // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
893 this->assignLocalParticles(particlesToSend[myRank], m_particles);
894
895 // If using MPI then we have to scatter the particles across MPI ranks.
896#ifdef CH_MPI
898
899 ParticleOps::scatterParticles(receivedParticles, particlesToSend);
900
901 // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
902 // we got from there.
903 this->assignLocalParticles(receivedParticles, m_particles);
904#endif
905 }
906
907 if (m_debug) {
908 this->sanityCheck();
909 }
910}
911
912template <class P>
913void
915{
916 CH_TIME("ParticleContainer::transferParticles");
917
918 CH_assert(m_isDefined);
919 CH_assert(this->getRealm() == a_otherContainer.getRealm());
920
921 if (m_isOrganizedByCell) {
922 MayDay::Error("ParticleContainer::transferParticles(ParticleContainer<P>) - particles are sorted by cell!");
923 }
924
925 if (a_otherContainer.isOrganizedByCell()) {
926 MayDay::Error("ParticleContainer::transferParticles(ParticleContainer<P>) - other particles are sorted by cell!");
927 }
928
929 if (a_otherContainer.getRealm() != m_realm) {
930 MayDay::Error(
931 "ParticleContainer::transferParticles(ParticleContainer<P>) - other container defined over a different realm");
932 }
933
934 this->transferParticles(a_otherContainer.getParticles());
935}
936
937template <class P>
938void
940{
941 CH_TIME("ParticleContainer::transferParticles(AMRParticles)");
942 if (m_verbose) {
943 pout() << "ParticleContainer::transferParticle(AMRParticles)" << endl;
944 }
945
946 CH_assert(m_isDefined);
947
948 if (m_isOrganizedByCell) {
949 MayDay::Error("ParticleContainer::transferParticles(ParticleContainer<P>) - particles are sorted by cell!");
950 }
951
952 // Ok, transfer the particles.
953 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
954 const DisjointBoxLayout& dbl = m_grids[lvl];
955 const DataIterator& dit = dbl.dataIterator();
956
957 const int nbox = dit.size();
958
959#pragma omp parallel for schedule(runtime)
960 for (int mybox = 0; mybox < nbox; mybox++) {
961 const DataIndex& din = dit[mybox];
962
965
966 myParticles.catenate(otherParticles);
967 }
968 }
969}
970
971template <class P>
972void
974{
975 CH_TIME("ParticleContainer<P>::remap");
976 if (m_verbose) {
977 pout() << "ParticleContainer::remap" << endl;
978 }
979
980 // TLDR: This routine is quite long but does the full remapping on the whole hierarchy. It will discard particles that fall off the grid.
981 //
982 // This is done in the following steps:
983 // 1) Collect all particles from this rank onto thread-local variables
984 // 2) Iterate through those particles and figure out where they end up up the AMR hierarchy (level, grid index, and ownership, i.e. the MPI rank)
985 // 3) Collect those particles onto a rank-only (no thread loacl) variable
986 // 4) Assign particles _locally_, i.e. assign particles sent from this rank to this rank directly onto m_particles
987 // 5) If using MPI, scatter the particles to the appropriate ranks.
988 // 6) Assign particles locally from the particles that were scattered to this rank.
989
990 const uint32_t numRanks = numProc();
991 const uint32_t myRank = procID();
992
994
995 // These are the particles that get sent to each MPI rank. The pair contains the grid level and grid index.
996 // Each MPI rank (and OpenMP thread) goes through it's particles and checks if they've falled off the grid. If they have,
997 // then the particles are moved onto appropriate lists that will get sent to each rank.
999
1000#pragma omp parallel
1001 {
1003
1005
1006 // Collect particles owned by this rank/thread.
1007 this->transferParticlesToSingleList(outcasts, m_particles);
1008
1009 // Map the particles to other grid patches
1010 this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
1011
1012 // Catenate the thread-local particle lists onto particlesToSend
1013#pragma omp critical
1014 this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
1015 }
1016
1017 // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
1018 this->assignLocalParticles(particlesToSend[myRank], m_particles);
1019
1020 // If using MPI then we have to scatter the particles across MPI ranks.
1021#ifdef CH_MPI
1023
1024 ParticleOps::scatterParticles(receivedParticles, particlesToSend);
1025
1026 // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
1027 // we got from there.
1028 this->assignLocalParticles(receivedParticles, m_particles);
1029#endif
1030
1031 if (m_debug) {
1032 this->sanityCheck();
1033 }
1034}
1035
1036template <typename P>
1037void
1039{
1040 CH_TIME("ParticleContainer::sanityCheck");
1041
1042 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1043 const DisjointBoxLayout& dbl = m_grids[lvl];
1044 const RealVect dx = m_dx[lvl];
1045
1046 for (DataIterator dit(dbl); dit.ok(); ++dit) {
1047 const List<P>& particles = (*m_particles[lvl])[dit()].listItems();
1048 const Box box = dbl[dit()];
1049
1050 for (ListIterator<P> lit(particles); lit.ok(); ++lit) {
1051 const RealVect particlePosition = lit().position();
1053
1054 if (!(box.contains(particleCell))) {
1055 MayDay::Error("ParticleContainer::remap -- error2: particle is not contained in grid box!");
1056 }
1057 }
1058 }
1059 }
1060}
1061
1062template <typename P>
1063inline void
1065{
1066 CH_TIME("ParticleContainer::transferParticlesToSingleList");
1067
1068 for (int lvl = 0; lvl < a_particles.size(); lvl++) {
1069 const BoxLayout& dbl = a_particles[lvl]->getBoxes();
1070 const DataIterator& dit = dbl.dataIterator();
1071
1072 const int nbox = dit.size();
1073
1074#pragma omp for schedule(runtime)
1075 for (int mybox = 0; mybox < nbox; mybox++) {
1076 const DataIndex& din = dit[mybox];
1077
1079
1080 a_list.catenate(particles);
1081 }
1082 }
1083}
1084
1085template <typename P>
1086inline void
1088{
1089 CH_TIME("ParticleContainer::copyParticlesToSingleList");
1090
1091 for (int lvl = 0; lvl < a_particles.size(); lvl++) {
1092 const BoxLayout& dbl = a_particles[lvl]->getBoxes();
1093 const DataIterator& dit = dbl.dataIterator();
1094
1095 const int nbox = dit.size();
1096
1097#pragma omp for schedule(runtime)
1098 for (int mybox = 0; mybox < nbox; mybox++) {
1099 const DataIndex& din = dit[mybox];
1100
1101 const List<P>& particles = (*a_particles[lvl])[din].listItems();
1102
1103 a_list.join(particles);
1104 }
1105 }
1106}
1107
1108template <typename P>
1109inline void
1111 List<P>& a_unmappedParticles) const noexcept
1112{
1113 CH_TIME("ParticleContainer::mapParticlesToAMRGrid");
1114
1115 const uint32_t numRanks = numProc();
1116 const uint32_t myRank = procID();
1117
1118 std::vector<RealVect> quasiDx(1 + m_finestLevel);
1119
1120 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1121 quasiDx[lvl] = m_blockingFactor * m_dx[lvl];
1122 }
1123
1124 // Starting on the finest level -- figure out where the particles wound up. We are going from finest to coarsest here because we want
1125 // the particle to end up on the finest grid level that contains them.
1127 bool foundTile = false;
1128
1129 for (int lvl = m_finestLevel; lvl >= 0 && !foundTile; lvl--) {
1130 const IntVect particleTile = locateBin(lit().position(), quasiDx[lvl], m_probLo);
1131
1132 const auto& myLevelTiles = m_levelTiles[lvl]->getMyTiles();
1133 const auto& otherLevelTiles = m_levelTiles[lvl]->getOtherTiles();
1134
1135 if (myLevelTiles.find(particleTile) != myLevelTiles.end()) {
1136 // Found the particle on this level and this rank is the one who owns it.
1138 const uint32_t toRank = myRank;
1139
1141
1142 foundTile = true;
1143 }
1144#ifdef CH_MPI
1145 else if (otherLevelTiles.find(particleTile) != otherLevelTiles.end()) {
1146 // Particle was found on this level but no on this rank.
1148 const uint32_t& toRank = otherLevelTiles.at(particleTile).second;
1149
1151
1152 foundTile = true;
1153 }
1154#endif
1155 }
1156
1157 // If this triggers the particle fell off the domain and just move onto the next one.
1158 if (!foundTile) {
1159 ++lit;
1160 }
1161 }
1162
1163 // Rest of the particles fell of the domain and have to go.
1164 a_unmappedParticles.clear();
1165}
1166
1167template <typename P>
1168inline void
1170 std::vector<ParticleMap<List<P>>>& a_localParticles) const noexcept
1171{
1172 CH_TIME("ParticleContainer::catenateParticleMaps");
1173
1174 const uint32_t numRanks = numProc();
1175
1177
1178 for (int irank = 0; irank < numRanks; irank++) {
1180
1181 for (auto& m : a_localParticles[irank]) {
1182 globalParticles[m.first].catenate(m.second);
1183 }
1184 }
1185}
1186
1187template <typename P>
1188inline void
1190 AMRParticles<P>& a_particleData) const noexcept
1191{
1192 CH_TIME("ParticleContainer::assignLocalParticles");
1193
1194#pragma omp parallel
1195 {
1196#pragma omp single
1197 {
1198 for (auto mapIter = a_mappedParticles.begin(); mapIter != a_mappedParticles.end(); mapIter++) {
1199#pragma omp task firstprivate(mapIter)
1200 {
1201 const uint32_t gridLevel = mapIter->first.first;
1202 const uint32_t gridIndex = mapIter->first.second;
1203 const DataIndex din = m_levelTiles[gridLevel]->getMyGrids().at(gridIndex);
1204
1205 List<P>& particles = mapIter->second;
1207
1208 myParticles.catenate(particles);
1209 }
1210 }
1211 }
1212 }
1213
1214 a_mappedParticles.clear();
1215}
1216
1217template <class P>
1218void
1220{
1221 CH_TIME("ParticleContainer::preRegrid");
1222 if (m_verbose) {
1223 pout() << "ParticleContainer::preRegrid" << endl;
1224 }
1225
1226 // TLDR: This routine takes all the particles on each level and puts them in a single list (per processor). After than
1227 // we can safely destruct the ParticleData on each level without losing the particles.
1228
1229 CH_assert(m_isDefined);
1230 if (m_isOrganizedByCell) {
1231 MayDay::Error("ParticleContainer::preRegrid - particles are sorted by cell!");
1232 }
1233
1234 // Fill cache particles on each level
1235 m_cacheParticles.resize(1 + m_finestLevel);
1236
1237 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1238 m_cacheParticles[lvl] = RefCountedPtr<ParticleData<P>>(
1239 new ParticleData<P>(m_grids[lvl], m_domains[lvl], m_blockingFactor, m_dx[lvl], m_probLo));
1240
1241 const DisjointBoxLayout& dbl = m_grids[lvl];
1242 const DataIterator& dit = dbl.dataIterator();
1243
1244 const int nbox = dit.size();
1245
1246#pragma omp parallel for schedule(runtime)
1247 for (int mybox = 0; mybox < nbox; mybox++) {
1248 const DataIndex& din = dit[mybox];
1249
1250 List<P>& cacheParticles = (*m_cacheParticles[lvl])[din].listItems();
1252
1254 }
1255 }
1256}
1257
1258template <class P>
1259void
1261{
1262 CH_TIME("ParticleContainer::resetParticleIDs");
1263 if (m_verbose) {
1264 pout() << "ParticleContainer::resetParticleIDs" << endl;
1265 }
1266
1267 CH_assert(m_isDefined);
1268 if (m_isOrganizedByCell) {
1269 MayDay::Error("ParticleContainer::preRegrid - particles are sorted by cell!");
1270 }
1271
1272 int32_t counter = 0;
1273
1274 const int32_t myRank = static_cast<uint32_t>(procID());
1275
1276 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1277 const DisjointBoxLayout& dbl = m_grids[lvl];
1278 const DataIterator& dit = dbl.dataIterator();
1279
1280 const int nbox = dit.size();
1281
1282#pragma omp parallel for schedule(runtime)
1283 for (int mybox = 0; mybox < nbox; mybox++) {
1284 const DataIndex& din = dit[mybox];
1285
1287
1288 for (ListIterator<P> lit(particles); lit.ok(); ++lit, counter++) {
1289 lit().rankID() = myRank;
1290 lit().particleID() = counter;
1291 }
1292 }
1293 }
1294}
1295
1296template <class P>
1297void
1300 const Vector<Real>& a_dx,
1301 const Vector<int>& a_refRat,
1304 const int a_lmin,
1305 const int a_newFinestLevel)
1306{
1307 CH_TIME("ParticleContainer::regrid");
1308 if (m_verbose) {
1309 pout() << "ParticleContainer::regrid" << endl;
1310 }
1311
1312 // TLDR: This is the main regrid routine for particle container. This is essentially a ::define() followed by several types of remapping steps
1313 CH_assert(m_isDefined);
1314
1315 if (m_isOrganizedByCell) {
1316 MayDay::Error("ParticleContainer::regrid(...) - particles are sorted by cell!");
1317 }
1318
1319 // Update this stuff
1320 m_grids = a_grids;
1321 m_domains = a_domains;
1322 m_refRat = a_refRat;
1323 m_validRegion = a_validMask;
1324 m_finestLevel = a_newFinestLevel;
1325 m_levelTiles = a_levelTiles;
1326
1327 m_dx.resize(1 + m_finestLevel);
1328 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1329 m_dx[lvl] = a_dx[lvl] * RealVect::Unit;
1330 }
1331
1332 this->setupGrownGrids(a_lmin, m_finestLevel);
1333 this->setupParticleData(a_lmin, m_finestLevel);
1334
1335 // Perform the remapping operation.
1336 const uint32_t numRanks = numProc();
1337 const uint32_t myRank = procID();
1338
1340
1341 // These are the particles that get sent to each MPI rank. The pair contains the grid level and grid index.
1342 // Each MPI rank (and OpenMP thread) goes through it's particles and checks if they've falled off the grid. If they have,
1343 // then the particles are moved onto appropriate lists that will get sent to each rank.
1345
1346#pragma omp parallel
1347 {
1349
1351
1352 // Collect particles owned by this rank/thread.
1353 this->transferParticlesToSingleList(outcasts, m_cacheParticles);
1354
1355 // Map the particles to other grid patches
1356 this->mapParticlesToAMRGrid(threadLocalParticlesToSend, outcasts);
1357
1358 // Catenate the thread-local particle lists onto particlesToSend
1359#pragma omp critical
1360 this->catenateParticleMaps(particlesToSend, threadLocalParticlesToSend);
1361 }
1362
1363 // Particles that go from this rank to this rank don't need to be communicated so that we can place them directly on the correct patch.
1364 this->assignLocalParticles(particlesToSend[myRank], m_particles);
1365
1366 // If using MPI then we have to scatter the particles across MPI ranks.
1367#ifdef CH_MPI
1369
1370 ParticleOps::scatterParticles(receivedParticles, particlesToSend);
1371
1372 // Assign particles to the correct level and grid patch -- we iterate through receivedParticles and decode the information
1373 // we got from there.
1374 this->assignLocalParticles(receivedParticles, m_particles);
1375#endif
1376
1377 if (m_debug) {
1378 this->sanityCheck();
1379 }
1380
1381 // Discard transient storage.
1382 m_cacheParticles.resize(0);
1383}
1384
1385template <class P>
1386template <Real& (P::*particleScalarField)()>
1387void
1389{
1390 CH_TIME("ParticleContainer::setValue");
1391
1392 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1393 const DisjointBoxLayout& dbl = m_grids[lvl];
1394 const DataIterator& dit = dbl.dataIterator();
1395
1396 const int nbox = dit.size();
1397
1398#pragma omp parallel for schedule(runtime)
1399 for (int mybox = 0; mybox < nbox; mybox++) {
1400 const DataIndex& din = dit[mybox];
1401
1403
1404 for (ListIterator<P> lit(patchParticles); lit.ok(); ++lit) {
1405 P& p = lit();
1406
1408 }
1409 }
1410 }
1411}
1412
1413template <class P>
1414template <RealVect& (P::*particleVectorField)()>
1415void
1417{
1418 CH_TIME("ParticleContainer::setValue");
1419
1420 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1421 const DisjointBoxLayout& dbl = m_grids[lvl];
1422 const DataIterator& dit = dbl.dataIterator();
1423
1424 const int nbox = dit.size();
1425
1426#pragma omp parallel for schedule(runtime)
1427 for (int mybox = 0; mybox < nbox; mybox++) {
1428 const DataIndex& din = dit[mybox];
1429
1431
1432 for (ListIterator<P> lit(patchParticles); lit.ok(); ++lit) {
1433 P& p = lit();
1434
1436 }
1437 }
1438 }
1439}
1440
1441template <class P>
1442unsigned long long
1444{
1445 CH_TIME("ParticleContainer::getNumberOfValidParticlesLocal");
1446
1447 CH_assert(m_isDefined);
1448
1449 unsigned long long n = 0;
1450
1451 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1452 const DisjointBoxLayout& dbl = m_grids[lvl];
1453 const DataIterator& dit = dbl.dataIterator();
1454
1455 const int nbox = dit.size();
1456
1457#pragma omp parallel for schedule(runtime) reduction(+ : n)
1458 for (int mybox = 0; mybox < nbox; mybox++) {
1459 const DataIndex& din = dit[mybox];
1460
1461 const List<P>& particles = (*m_particles[lvl])[din].listItems();
1462
1463 n += (unsigned long long)particles.length();
1464 }
1465 }
1466
1467 return n;
1468}
1469
1470template <class P>
1471unsigned long long
1473{
1474 CH_TIME("ParticleContainer::getNumberOfValidParticlesGlobal");
1475
1476 CH_assert(m_isDefined);
1477
1478 const unsigned long long n = this->getNumberOfValidParticlesLocal();
1479
1480 return ParallelOps::sum(n);
1481}
1482
1483template <class P>
1484unsigned long long
1486{
1487 CH_TIME("ParticleContainer::getNumberOfOutcastParticlesLocal");
1488
1489 CH_assert(m_isDefined);
1490
1491 unsigned long long n = 0;
1492
1493 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1494 const DisjointBoxLayout& dbl = m_grids[lvl];
1495 const DataIterator& dit = dbl.dataIterator();
1496
1497 const int nbox = dit.size();
1498
1499#pragma omp parallel for schedule(runtime) reduction(+ : n)
1500 for (int mybox = 0; mybox < nbox; mybox++) {
1501 const DataIndex& din = dit[mybox];
1502
1503 const List<P>& outcast = m_particles[lvl]->outcast();
1504
1505 n += (unsigned long long)outcast.length();
1506 }
1507 }
1508
1509 return n;
1510}
1511
1512template <class P>
1513unsigned long long
1515{
1516 CH_TIME("ParticleContainer::getNumberOfOutcastParticlesGlobal");
1517
1518 CH_assert(m_isDefined);
1519
1520 const unsigned long long n = this->getNumberOfOutcastParticlesLocal();
1521
1522 return ParallelOps::sum(n);
1523}
1524
1525template <class P>
1526unsigned long long
1528{
1529 CH_TIME("ParticleContainer::getNumberOfMaskParticlesLocal");
1530
1531 CH_assert(m_isDefined);
1532
1533 unsigned long long n = 0;
1534
1535 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1536 const DisjointBoxLayout& dbl = m_grids[lvl];
1537 const DataIterator& dit = dbl.dataIterator();
1538
1539 const int nbox = dit.size();
1540
1541#pragma omp parallel for schedule(runtime) reduction(+ : n)
1542 for (int mybox = 0; mybox < nbox; mybox++) {
1543 const DataIndex& din = dit[mybox];
1544
1545 const List<P>& maskParticles = (*m_maskParticles[lvl])[din].listItems();
1546
1547 n += (unsigned long long)maskParticles.length();
1548 }
1549 }
1550
1551 return n;
1552}
1553
1554template <class P>
1555unsigned long long
1557{
1558 CH_TIME("ParticleContainer::getNumberOfMaskParticlesGlobal");
1559
1560 CH_assert(m_isDefined);
1561
1562 const unsigned long long n = this->getNumberOfMaskParticlesLocal();
1563
1564 return ParallelOps::sum(n);
1565}
1566
1567template <class P>
1568void
1570{
1571 CH_TIME("ParticleContainer::copyMaskParticles(amr)");
1572
1573 CH_assert(m_isDefined);
1574
1575 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1576 if (!a_mask[lvl].isNull()) {
1577 this->copyMaskParticles(lvl, *a_mask[lvl]);
1578 }
1579 }
1580}
1581
1582template <class P>
1583void
1585{
1586 CH_TIME("ParticleContainer::copyMaskParticles(level)");
1587 if (m_verbose) {
1588 pout() << "ParticleContainer::copyMaskParticles(level)" << endl;
1589 }
1590
1591 CH_assert(m_isDefined);
1592
1593 m_maskParticles[a_level]->clear();
1594
1595 const RealVect dx = m_dx[a_level];
1596
1597 // Copy particles from m_particles to m_maskParticles if they lie in the input mask.
1598 const DisjointBoxLayout& dbl = m_grids[a_level];
1599 const DataIterator& dit = dbl.dataIterator();
1600
1601 const int nbox = dit.size();
1602
1603#pragma omp parallel for schedule(runtime)
1604 for (int mybox = 0; mybox < nbox; mybox++) {
1605 const DataIndex& din = dit[mybox];
1606
1607 const BaseFab<bool>& mask = a_mask[din];
1608
1609 if (mask.isUsable()) {
1610 const Box gridBox = m_grids[a_level][din];
1611 const Box maskBox = mask.box();
1612
1614
1615 List<P>& maskParticles = (*m_maskParticles[a_level])[din].listItems();
1617
1618 for (ListIterator<P> lit(particles); lit.ok(); ++lit) {
1619 const RealVect x = lit().position();
1620 const IntVect iv = ParticleOps::getParticleCellIndex(x, m_probLo, dx);
1621
1622 if (!(maskBox.contains(iv))) {
1623 std::cout << a_level << "\t" << iv << "\t" << x << "\t" << maskBox << std::endl;
1624
1625 MayDay::Error("ParticleContainer::copyMaskParticles -- logic bust. Particle has fallen off grid");
1626 }
1627 else if (mask(iv)) {
1628 maskParticles.add(lit());
1629 }
1630 }
1631 }
1632 }
1633}
1634
1635template <class P>
1636void
1638{
1639 CH_TIME("ParticleContainer::transferMaskParticles(amr)");
1640
1641 CH_assert(m_isDefined);
1642
1643 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1644 if (!a_mask[lvl].isNull()) {
1645 this->transferMaskParticles(lvl, *a_mask[lvl]);
1646 }
1647 }
1648}
1649
1650template <class P>
1651void
1653{
1654 CH_TIME("ParticleContainer::transferMaskParticles(level)");
1655 if (m_verbose) {
1656 pout() << "ParticleContainer::transferMaskParticles(level)" << endl;
1657 }
1658
1659 CH_assert(m_isDefined);
1660
1661 const RealVect dx = m_dx[a_level];
1662
1663 // Copy particles from m_particles to m_maskParticles if they lie in the input mask.
1664 const DisjointBoxLayout& dbl = m_grids[a_level];
1665 const DataIterator& dit = dbl.dataIterator();
1666
1667 const int nbox = dit.size();
1668
1669#pragma omp parallel for schedule(runtime)
1670 for (int mybox = 0; mybox < nbox; mybox++) {
1671 const DataIndex& din = dit[mybox];
1672
1673 const BaseFab<bool>& mask = a_mask[din];
1674
1675 if (mask.isUsable()) {
1676 const Box gridBox = m_grids[a_level][din];
1677 const Box maskBox = mask.box();
1678
1680
1681 List<P>& maskParticles = (*m_maskParticles[a_level])[din].listItems();
1683
1684 for (ListIterator<P> lit(particles); lit.ok();) {
1685 const RealVect x = lit().position();
1686 const IntVect iv = ParticleOps::getParticleCellIndex(x, m_probLo, dx);
1687
1688 if (!(maskBox.contains(iv))) {
1689 // std::cout << a_level << "\t" << iv << "\t" << x << std::endl;
1690
1691 MayDay::Warning("ParticleContainer::transferMaskParticles -- logic bust. Particle has fallen off grid");
1692
1693 ++lit;
1694 }
1695 else {
1696 if (mask(iv)) {
1697 maskParticles.transfer(lit);
1698 }
1699 else {
1700 ++lit;
1701 }
1702 }
1703 }
1704 }
1705 }
1706}
1707
1708template <class P>
1709void
1711{
1712 CH_assert(m_isDefined);
1713
1714 this->clear(m_particles);
1715}
1716
1717template <class P>
1718void
1720{
1721 CH_assert(m_isDefined);
1722
1723 this->clear(m_bufferParticles);
1724}
1725
1726template <class P>
1727void
1729{
1730 CH_assert(m_isDefined);
1731
1732 this->clear(m_maskParticles);
1733}
1734
1735template <class P>
1736void
1738{
1739 CH_assert(m_isDefined);
1740
1741 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1743
1744 List<P>& outcast = particleData.outcast();
1745
1746 outcast.clear();
1747 }
1748}
1749
1750template <class P>
1751void
1753{
1754 if (m_verbose) {
1755 pout() << "ParticleContainer::clear(AMRParticles)" << endl;
1756 }
1757
1758 CH_assert(m_isDefined);
1759
1760 for (int lvl = 0; lvl <= m_finestLevel; lvl++) {
1761
1763
1764 // Clear patch particles
1765 const BoxLayout& grids = levelParticles.getBoxes();
1766 const DataIterator& dit = grids.dataIterator();
1767
1768 const int nbox = dit.size();
1769
1770#pragma omp parallel for schedule(runtime)
1771 for (int mybox = 0; mybox < nbox; mybox++) {
1772 const DataIndex din = dit[mybox];
1773
1774 List<P>& patchParticles = levelParticles[din].listItems();
1775
1776 patchParticles.clear();
1777 }
1778
1779 // Clear outcast
1780 List<P>& outcast = levelParticles.outcast();
1781 outcast.clear();
1782 }
1783}
1784
1785#include <CD_NamespaceFooter.H>
1786
1787#endif
Declaration of a namespace for proto-typing grid and EB loops.
Declaration of various useful OpenMP-related utilities.
Agglomeration of basic MPI reductions.
Declaration of a class for holding particles on an AMR hierarchy.
Declaration of a static class containing some common useful particle routines that would otherwise be...
Implementation of CD_Timer.H.
AMRCellParticles< P > & getCellParticles()
Get all cell particles.
Definition CD_ParticleContainerImplem.H:358
void getCellParticlesDestructive(BinFab< P > &a_cellParticles, const int a_lvl, const DataIndex a_dit)
Fill a cell-sorted particle data holder with all the particles in the grid patch. The original partic...
Definition CD_ParticleContainerImplem.H:428
void organizeParticlesByPatch()
Sort particles by cell.
Definition CD_ParticleContainerImplem.H:508
const Vector< RealVect > getDx() const noexcept
Get grid resolutions.
Definition CD_ParticleContainerImplem.H:263
void clearMaskParticles() const
Clear the "mask" particles.
Definition CD_ParticleContainerImplem.H:1728
unsigned long long getNumberOfOutcastParticlesGlobal() const
Get global number of particles.
Definition CD_ParticleContainerImplem.H:1514
void setupParticleData(const int a_base, const int a_finestLevel)
Setup function for the particle data (m_particles and m_maskParticles)
Definition CD_ParticleContainerImplem.H:154
const RealVect getProbLo() const noexcept
Get lower-left corner.
Definition CD_ParticleContainerImplem.H:256
unsigned long long getNumberOfValidParticlesLocal() const
Get local number of particles.
Definition CD_ParticleContainerImplem.H:1443
void setValue(const Real a_value)
Set the particle member to the input value.
Definition CD_ParticleContainerImplem.H:1388
void preRegrid(const int a_base)
Cache particles before calling regrid.
Definition CD_ParticleContainerImplem.H:1219
void define(const Vector< DisjointBoxLayout > &a_grids, const Vector< ProblemDomain > &a_domains, const Vector< Real > &a_dx, const Vector< int > &a_refRat, const Vector< ValidMask > &a_validMask, const Vector< RefCountedPtr< LevelTiles > > &a_levelTiles, const RealVect &a_probLo, const int a_blockingFactor, const int a_finestLevel, const std::string a_realm)
Define the container. This will do a clear-out of all particles.
Definition CD_ParticleContainerImplem.H:77
virtual ~ParticleContainer()
Destructor ( does nothing)
Definition CD_ParticleContainerImplem.H:70
void sortParticles() noexcept
Sort particles according to the < operator in the particle.
Definition CD_ParticleContainerImplem.H:197
const Vector< DisjointBoxLayout > & getGrids() const
Get the AMR grids.
Definition CD_ParticleContainerImplem.H:249
unsigned long long getNumberOfOutcastParticlesLocal() const
Get local number of particles.
Definition CD_ParticleContainerImplem.H:1485
ParticleData< P > & operator[](const int a_level)
Get particle data on a level.
Definition CD_ParticleContainerImplem.H:332
void addParticles(const List< P > &a_particles)
Add particles to container.
Definition CD_ParticleContainerImplem.H:551
void setupGrownGrids(const int a_base, const int a_finestLevel)
Set up grown grids.
Definition CD_ParticleContainerImplem.H:123
unsigned long long getNumberOfMaskParticlesGlobal() const
Get the number particles in the halo cells.
Definition CD_ParticleContainerImplem.H:1556
void clear(AMRParticles< P > &a_particles) const
Clear particles on input data holder.
Definition CD_ParticleContainerImplem.H:1752
void remap()
Remap over the entire AMR hierarchy.
Definition CD_ParticleContainerImplem.H:973
void clearParticles()
Clear all particles.
Definition CD_ParticleContainerImplem.H:1710
void clearOutcast() noexcept
Clear outcast particles.
Definition CD_ParticleContainerImplem.H:1737
void catenateParticleMaps(std::vector< ParticleMap< List< P > > > &a_globalParticles, std::vector< ParticleMap< List< P > > > &a_localParticles) const noexcept
Catenate the particles. This is usually called within OpenMP parallel regions.
Definition CD_ParticleContainerImplem.H:1169
unsigned long long getNumberOfMaskParticlesLocal() const
Get the number particles in the halo cells.
Definition CD_ParticleContainerImplem.H:1527
void organizeParticlesByCell()
Sort particles by cell.
Definition CD_ParticleContainerImplem.H:474
void mapParticlesToAMRGrid(std::vector< ParticleMap< List< P > > > &a_mappedParticles, List< P > &a_unmappedParticles) const noexcept
Iterate through the unmapped particles and map them to proper level, grid indices,...
Definition CD_ParticleContainerImplem.H:1110
int getFinestLevel() const
Get finest AMR level.
Definition CD_ParticleContainerImplem.H:231
void clearBufferParticles() const
Clear the buffer particles.
Definition CD_ParticleContainerImplem.H:1719
void assignLocalParticles(ParticleMap< List< P > > &a_mappedParticles, AMRParticles< P > &a_particleData) const noexcept
Gather particles locally.
Definition CD_ParticleContainerImplem.H:1189
const std::string getRealm() const
Get the realm where this ParticleContainer lives.
Definition CD_ParticleContainerImplem.H:240
void transferParticlesToSingleList(List< P > &a_list, AMRParticles< P > &a_particles) const noexcept
Gather the particles onto a single list.
Definition CD_ParticleContainerImplem.H:1064
bool isOrganizedByCell() const
Is cell-sorted or not.
Definition CD_ParticleContainerImplem.H:224
void transferMaskParticles(const Vector< RefCountedPtr< LevelData< BaseFab< bool > > > > &a_mask)
Copy particles to the mask particle data holder.
Definition CD_ParticleContainerImplem.H:1637
void copyParticlesToSingleList(List< P > &a_list, const AMRParticles< P > &a_particles) const noexcept
Copy the input particles onto a single list.
Definition CD_ParticleContainerImplem.H:1087
ParticleContainer()
Default constructor. Leaves object in undefined state.
Definition CD_ParticleContainerImplem.H:34
void sanityCheck() const noexcept
Run a sanity check and make sure all particles are in their correctly assigned box.
Definition CD_ParticleContainerImplem.H:1038
AMRParticles< P > & getMaskParticles()
Get the mask particles.
Definition CD_ParticleContainerImplem.H:314
unsigned long long getNumberOfValidParticlesGlobal() const
Get global number of particles.
Definition CD_ParticleContainerImplem.H:1472
void resetParticleIDs() noexcept
Compute new particle IDs.
Definition CD_ParticleContainerImplem.H:1260
void addParticlesDestructive(List< P > &a_particles)
Add particles to container. The input particles are destroyed by this routine.
Definition CD_ParticleContainerImplem.H:617
AMRParticles< P > & getBufferParticles()
Get buffer particles on all levels.
Definition CD_ParticleContainerImplem.H:296
AMRParticles< P > & getParticles()
Get all particles on all levels.
Definition CD_ParticleContainerImplem.H:270
void transferParticles(ParticleContainer< P > &a_otherContainer)
Move particles into this container.
Definition CD_ParticleContainerImplem.H:914
void regrid(const Vector< DisjointBoxLayout > &a_grids, const Vector< ProblemDomain > &a_domains, const Vector< Real > &a_dx, const Vector< int > &a_refRat, const Vector< ValidMask > &a_validMask, const Vector< RefCountedPtr< LevelTiles > > &a_levelTiles, const int a_base, const int a_newFinestLevel)
Regrid function. a_base is the coarsest grid level which did not change.
Definition CD_ParticleContainerImplem.H:1298
void copyMaskParticles(const Vector< RefCountedPtr< LevelData< BaseFab< bool > > > > &a_mask) const
Copy particles to mask particle data holder.
Definition CD_ParticleContainerImplem.H:1569
static IntVect getParticleCellIndex(const RealVect &a_particlePosition, const RealVect &a_probLo, const Real &a_dx) noexcept
Get the cell index corresponding to the particle position.
Definition CD_ParticleOpsImplem.H:30
Base class for a tracer particle solver. This solver can advance particles in a pre-defined velocity ...
Definition CD_TracerParticleSolver.H:37
ParticleContainer< P > m_particles
Computational particles.
Definition CD_TracerParticleSolver.H:405
TracerParticleSolver()
Default constructor.
Definition CD_TracerParticleSolverImplem.H:25
std::string m_realm
Realm where this solver lives.
Definition CD_TracerParticleSolver.H:345
virtual ParticleContainer< P > & getParticles()
Get all particles.
Definition CD_TracerParticleSolverImplem.H:662
ALWAYS_INLINE void loop(const Box &a_computeBox, Functor &&kernel, const IntVect &a_stride=IntVect::Unit)
Launch a C++ kernel over a regular grid.
Definition CD_BoxLoopsImplem.H:20
Real sum(const Real &a_value) noexcept
Compute the sum across all MPI ranks.
Definition CD_ParallelOpsImplem.H:353