My Project
CpGridData.hpp
1//===========================================================================
2//
3// File: CpGrid.hpp
4//
5// Created: Sep 17 21:11:41 2013
6//
7// Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8// Bård Skaflestad <bard.skaflestad@sintef.no>
9// Markus Blatt <markus@dr-blatt.de>
10//
11// Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
12// and got transfered here during refactoring for the parallelization.
13//
14// $Date$
15//
16// $Revision$
17//
18//===========================================================================
19
20/*
21 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
22 Copyright 2009, 2010, 2013, 2022 Equinor ASA.
23 Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
24
25 This file is part of The Open Porous Media project (OPM).
26
27 OPM is free software: you can redistribute it and/or modify
28 it under the terms of the GNU General Public License as published by
29 the Free Software Foundation, either version 3 of the License, or
30 (at your option) any later version.
31
32 OPM is distributed in the hope that it will be useful,
33 but WITHOUT ANY WARRANTY; without even the implied warranty of
34 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35 GNU General Public License for more details.
36
37 You should have received a copy of the GNU General Public License
38 along with OPM. If not, see <http://www.gnu.org/licenses/>.
39*/
47#ifndef OPM_CPGRIDDATA_HEADER
48#define OPM_CPGRIDDATA_HEADER
49
50// Warning suppression for Dune includes.
51#include <opm/grid/utility/platform_dependent/disable_warnings.h>
52
53#include <dune/common/version.hh>
54#include <dune/common/parallel/mpihelper.hh>
55#ifdef HAVE_DUNE_ISTL
56#include <dune/istl/owneroverlapcopy.hh>
57#endif
58
59#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
60#include <dune/common/parallel/communication.hh>
61#else
62#include <dune/common/parallel/collectivecommunication.hh>
63#endif
64#include <dune/common/parallel/indexset.hh>
65#include <dune/common/parallel/interface.hh>
66#include <dune/common/parallel/plocalindex.hh>
67#if DUNE_VERSION_NEWER(DUNE_GRID, 2, 7)
68#include <dune/common/parallel/variablesizecommunicator.hh>
69#else
70#include <opm/grid/utility/VariableSizeCommunicator.hpp>
71#endif
72#include <dune/grid/common/gridenums.hh>
73
74#include <opm/grid/utility/platform_dependent/reenable_warnings.h>
75
76#if HAVE_ECL_INPUT
77#include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
78#endif
79
80#include <array>
81#include <tuple>
82#include <algorithm>
83#include <set>
84
85#include "OrientedEntityTable.hpp"
86#include "DefaultGeometryPolicy.hpp"
88
89#include <opm/grid/utility/OpmParserIncludes.hpp>
90
91#include "Entity2IndexDataHandle.hpp"
92#include "DataHandleWrappers.hpp"
93#include "GlobalIdMapping.hpp"
94namespace Opm
95{
96class EclipseState;
97}
98namespace Dune
99{
100class CpGrid;
101
102namespace cpgrid
103{
104
105class IndexSet;
106class IdSet;
107class LevelGlobalIdSet;
108class PartitionTypeIndicator;
109template<int,int> class Geometry;
110template<int> class Entity;
111template<int> class EntityRep;
112
113namespace mover
114{
115template<class T, int i> struct Mover;
116}
117
123{
124 template<class T, int i> friend struct mover::Mover;
125
126 friend class GlobalIdSet;
127
128private:
129 CpGridData(const CpGridData& g);
130
131public:
132 enum{
133#ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
141#else
146 MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
147#endif
148 };
149
153 explicit CpGridData(MPIHelper::MPICommunicator comm);
154
156 CpGridData();
158 ~CpGridData();
160 int size(int codim) const;
161
163 int size (GeometryType type) const
164 {
165 if (type.isCube()) {
166 return size(3 - type.dim());
167 } else {
168 return 0;
169 }
170 }
174 void readSintefLegacyFormat(const std::string& grid_prefix);
175
179 void writeSintefLegacyFormat(const std::string& grid_prefix) const;
180
186 void readEclipseFormat(const std::string& filename, bool periodic_extension, bool turn_normals = false);
187
188#if HAVE_ECL_INPUT
197 void processEclipseFormat(const Opm::Deck& deck, bool periodic_extension, bool turn_normals = false, bool clip_z = false,
198 const std::vector<double>& poreVolume = std::vector<double>());
199
211 std::vector<std::size_t> processEclipseFormat(const Opm::EclipseGrid* ecl_grid, Opm::EclipseState* ecl_state,
212 bool periodic_extension, bool turn_normals = false, bool clip_z = false, bool pinchActive = true);
213#endif
214
224 void processEclipseFormat(const grdecl& input_data, Opm::EclipseState* ecl_state,
225 std::array<std::set<std::pair<int, int>>, 2>& nnc,
226 bool remove_ij_boundary, bool turn_normals, bool pinchActive);
227
235 void getIJK(int c, std::array<int,3>& ijk) const
236 {
237 int gc = global_cell_[c];
238 ijk[0] = gc % logical_cartesian_size_[0]; gc /= logical_cartesian_size_[0];
239 ijk[1] = gc % logical_cartesian_size_[1];
240 ijk[2] = gc / logical_cartesian_size_[1];
241 }
242
243 // Make unique boundary ids for all intersections.
244 void computeUniqueBoundaryIds();
245
249 bool uniqueBoundaryIds() const
250 {
251 return use_unique_boundary_ids_;
252 }
253
256 void setUniqueBoundaryIds(bool uids)
257 {
258 use_unique_boundary_ids_ = uids;
259 if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
260 computeUniqueBoundaryIds();
261 }
262 }
263
267 const std::vector<double>& zcornData() const {
268 return zcorn;
269 }
270
271
274 const IndexSet& indexSet() const
275 {
276 return *index_set_;
277 }
281 const std::array<int, 3>& logicalCartesianSize() const
282 {
283 return logical_cartesian_size_;
284 }
285
289 void distributeGlobalGrid(CpGrid& grid,
290 const CpGridData& view_data,
291 const std::vector<int>& cell_part);
292
298 template<class DataHandle>
299 void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
300
301#if HAVE_MPI
302#if DUNE_VERSION_NEWER(DUNE_GRID, 2, 7)
304 using Communicator = VariableSizeCommunicator<>;
305#else
307 using Communicator = Opm::VariableSizeCommunicator<>;
308#endif
309
311 using InterfaceMap = Communicator::InterfaceMap;
312
314 using CommunicationType = Dune::OwnerOverlapCopyCommunication<int,int>;
315
317 using ParallelIndexSet = CommunicationType::ParallelIndexSet;
318
320 using RemoteIndices = Dune::RemoteIndices<ParallelIndexSet>;
321
325 CommunicationType& cellCommunication()
326 {
327 return cell_comm_;
328 }
329
333 const CommunicationType& cellCommunication() const
334 {
335 return cell_comm_;
336 }
337
338 ParallelIndexSet& cellIndexSet()
339 {
340 return cellCommunication().indexSet();
341 }
342
343 const ParallelIndexSet& cellIndexSet() const
344 {
345 return cellCommunication().indexSet();
346 }
347
348 RemoteIndices& cellRemoteIndices()
349 {
350 return cellCommunication().remoteIndices();
351 }
352
353 const RemoteIndices& cellRemoteIndices() const
354 {
355 return cellCommunication().remoteIndices();
356 }
357#endif
358
359#ifdef HAVE_DUNE_ISTL
361 typedef Dune::OwnerOverlapCopyAttributeSet::AttributeSet AttributeSet;
362#else
364 enum AttributeSet{owner, overlap, copy};
365#endif
366
368 const std::vector<int>& sortedNumAquiferCells() const
369 {
370 return aquifer_cells_;
371 }
372
373private:
374
376 void populateGlobalCellIndexSet();
377
378#if HAVE_MPI
379
385 template<class DataHandle>
386 void gatherData(DataHandle& data, CpGridData* global_view,
387 CpGridData* distributed_view);
388
389
396 template<int codim, class DataHandle>
397 void gatherCodimData(DataHandle& data, CpGridData* global_data,
398 CpGridData* distributed_data);
399
406 template<class DataHandle>
407 void scatterData(DataHandle& data, CpGridData* global_data,
408 CpGridData* distributed_data, const InterfaceMap& cell_inf,
409 const InterfaceMap& point_inf);
410
418 template<int codim, class DataHandle>
419 void scatterCodimData(DataHandle& data, CpGridData* global_data,
420 CpGridData* distributed_data);
421
430 template<int codim, class DataHandle>
431 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
432 const Interface& interface);
433
442 template<int codim, class DataHandle>
443 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
444 const InterfaceMap& interface);
445
446#endif
447
448 void computeGeometry(CpGrid& grid,
449 const DefaultGeometryPolicy& globalGeometry,
450 const std::vector<int>& globalAquiferCells,
451 const OrientedEntityTable<0, 1>& globalCell2Faces,
452 DefaultGeometryPolicy& geometry,
453 std::vector<int>& aquiferCells,
454 const OrientedEntityTable<0, 1>& cell2Faces,
455 const std::vector< std::array<int,8> >& cell2Points);
456
457 // Representing the topology
469 Opm::SparseTable<int> face_to_point_;
471 std::vector< std::array<int,8> > cell_to_point_;
478 std::array<int, 3> logical_cartesian_size_;
485 std::vector<int> global_cell_;
487 cpgrid::EntityVariable<enum face_tag, 1> face_tag_; // {LEFT, BACK, TOP}
491 typedef FieldVector<double, 3> PointType;
495 cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
497 cpgrid::IndexSet* index_set_;
499 const cpgrid::IdSet* local_id_set_;
501 LevelGlobalIdSet* global_id_set_;
503 PartitionTypeIndicator* partition_type_indicator_;
504
506 typedef MPIHelper::MPICommunicator MPICommunicator;
507 #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 7)
508 using Communication = Dune::Communication<MPICommunicator>;
509#else
510 using CollectiveCommunication = Dune::CollectiveCommunication<MPICommunicator>;
511 using Communication = Dune::CollectiveCommunication<MPICommunicator>;
512#endif
514 Communication ccobj_;
515
516 // Boundary information (optional).
517 bool use_unique_boundary_ids_;
518
524 std::vector<double> zcorn;
525
527 std::vector<int> aquifer_cells_;
528
529#if HAVE_MPI
530
532 CommunicationType cell_comm_;
533
535 std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
536 /*
537 // code deactivated, because users cannot access face indices and therefore
538 // communication on faces makes no sense!
540 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
541 face_interfaces_;
542 */
544 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
545 point_interfaces_;
546
547#endif
548
549 // Return the geometry vector corresponding to the given codim.
550 template <int codim>
551 const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
552 {
553 return geometry_.geomVector<codim>();
554 }
555
556 friend class Dune::CpGrid;
557 template<int> friend class Entity;
558 template<int> friend class EntityRep;
559 friend class Intersection;
560 friend class PartitionTypeIndicator;
561};
562
563#if HAVE_MPI
564
565namespace
566{
571template<class T>
572T& getInterface(InterfaceType iftype,
573 std::tuple<T,T,T,T,T>& interfaces)
574{
575 switch(iftype)
576 {
577 case 0:
578 return std::get<0>(interfaces);
579 case 1:
580 return std::get<1>(interfaces);
581 case 2:
582 return std::get<2>(interfaces);
583 case 3:
584 return std::get<3>(interfaces);
585 case 4:
586 return std::get<4>(interfaces);
587 }
588 OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
589}
590
591} // end unnamed namespace
592
593template<int codim, class DataHandle>
594void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
595 const Interface& interface)
596{
597 this->template communicateCodim<codim>(data, dir, interface.interfaces());
598}
599
600template<int codim, class DataHandle>
601void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
602 const InterfaceMap& interface)
603{
604 Communicator comm(ccobj_, interface);
605
606 if(dir==ForwardCommunication)
607 comm.forward(data_wrapper);
608 else
609 comm.backward(data_wrapper);
610}
611#endif
612
613template<class DataHandle>
614void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
615 CommunicationDirection dir)
616{
617#if HAVE_MPI
618 if(data.contains(3,0))
619 {
620 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
621 communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
622 }
623 if(data.contains(3,3))
624 {
625 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
626 communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
627 }
628#else
629 // Suppress warnings for unused arguments.
630 (void) data;
631 (void) iftype;
632 (void) dir;
633#endif
634}
635}}
636
637#if HAVE_MPI
638#include <opm/grid/cpgrid/Entity.hpp>
639#include <opm/grid/cpgrid/Indexsets.hpp>
640
641namespace Dune {
642namespace cpgrid {
643
644namespace mover
645{
646template<class T>
647class MoveBuffer
648{
649 friend class Dune::cpgrid::CpGridData;
650public:
651 void read(T& data)
652 {
653 data=buffer_[index_++];
654 }
655 void write(const T& data)
656 {
657 buffer_[index_++]=data;
658 }
659 void reset()
660 {
661 index_=0;
662 }
663 void resize(std::size_t size)
664 {
665 buffer_.resize(size);
666 index_=0;
667 }
668private:
669 std::vector<T> buffer_;
670 typename std::vector<T>::size_type index_;
671};
672template<class DataHandle,int codim>
673struct Mover
674{
675};
676
677template<class DataHandle>
678struct BaseMover
679{
680 BaseMover(DataHandle& data)
681 : data_(data)
682 {}
683 template<class E>
684 void moveData(const E& from, const E& to)
685 {
686 std::size_t size=data_.size(from);
687 buffer.resize(size);
688 data_.gather(buffer, from);
689 buffer.reset();
690 data_.scatter(buffer, to, size);
691 }
692 DataHandle& data_;
693 MoveBuffer<typename DataHandle::DataType> buffer;
694};
695
696
697template<class DataHandle>
698struct Mover<DataHandle,0> : public BaseMover<DataHandle>
699{
700 Mover<DataHandle,0>(DataHandle& data, CpGridData* gatherView,
701 CpGridData* scatterView)
702 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
703 {}
704
705 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
706 {
707 Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
708 Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
709 this->moveData(from_entity, to_entity);
710 }
711 CpGridData* gatherView_;
712 CpGridData* scatterView_;
713};
714
715template<class DataHandle>
716struct Mover<DataHandle,1> : public BaseMover<DataHandle>
717{
718 Mover<DataHandle,1>(DataHandle& data, CpGridData* gatherView,
719 CpGridData* scatterView)
720 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
721 {}
722
723 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
724 {
725 typedef typename OrientedEntityTable<0,1>::row_type row_type;
726 EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
727 EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
728 OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
729 row_type from_faces=table.operator[](from_cell);
730 row_type to_faces=scatterView_->cell_to_face_[to_cell];
731
732 for(int i=0; i<from_faces.size(); ++i)
733 this->moveData(from_faces[i], to_faces[i]);
734 }
735 CpGridData *gatherView_;
736 CpGridData *scatterView_;
737};
738
739template<class DataHandle>
740struct Mover<DataHandle,3> : public BaseMover<DataHandle>
741{
742 Mover<DataHandle,3>(DataHandle& data, CpGridData* gatherView,
743 CpGridData* scatterView)
744 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
745 {}
746 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
747 {
748 const std::array<int,8>& from_cell_points=
749 gatherView_->cell_to_point_[from_cell_index];
750 const std::array<int,8>& to_cell_points=
751 scatterView_->cell_to_point_[to_cell_index];
752 for(std::size_t i=0; i<8; ++i)
753 {
754 this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
755 Entity<3>(*scatterView_, to_cell_points[i], true));
756 }
757 }
758 CpGridData* gatherView_;
759 CpGridData* scatterView_;
760};
761
762} // end mover namespace
763
764template<class DataHandle>
765void CpGridData::scatterData(DataHandle& data, CpGridData* global_data,
766 CpGridData* distributed_data, const InterfaceMap& cell_inf,
767 const InterfaceMap& point_inf)
768{
769#if HAVE_MPI
770 if(data.contains(3,0))
771 {
772 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
773 communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
774 }
775 if(data.contains(3,3))
776 {
777 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
778 communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
779 }
780#endif
781}
782
783template<int codim, class DataHandle>
784void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
785 CpGridData* distributed_data)
786{
787 CpGridData *gather_view, *scatter_view;
788 gather_view=global_data;
789 scatter_view=distributed_data;
790
791 mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
792
793
794 for(auto index=distributed_data->cellIndexSet().begin(),
795 end = distributed_data->cellIndexSet().end();
796 index!=end; ++index)
797 {
798 std::size_t from=index->global();
799 std::size_t to=index->local();
800 mover(from,to);
801 }
802}
803
804namespace
805{
806
807template<int codim, class T, class F>
808void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
809{
810 for(T it=begin; it!=endit; ++it)
811 {
812 Entity<codim> entity(distributed_data, it-begin, true);
813 PartitionType pt = entity.partitionType();
814 if(pt==Dune::InteriorEntity)
815 {
816 func(*it, entity);
817 }
818 }
819}
820
821template<class DataHandle>
822struct GlobalIndexSizeGatherer
823{
824 GlobalIndexSizeGatherer(DataHandle& data_,
825 std::vector<int>& ownedGlobalIndices_,
826 std::vector<int>& ownedSizes_)
827 : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
828 {}
829
830 template<class T, class E>
831 void operator()(T& i, E& entity)
832 {
833 ownedGlobalIndices.push_back(i);
834 ownedSizes.push_back(data.size(entity));
835 }
836 DataHandle& data;
837 std::vector<int>& ownedGlobalIndices;
838 std::vector<int>& ownedSizes;
839};
840
841template<class DataHandle>
842struct DataGatherer
843{
844 DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
845 DataHandle& data_)
846 : buffer(buffer_), data(data_)
847 {}
848
849 template<class T, class E>
850 void operator()(T& /* it */, E& entity)
851 {
852 data.gather(buffer, entity);
853 }
854 mover::MoveBuffer<typename DataHandle::DataType>& buffer;
855 DataHandle& data;
856};
857
858}
859
860template<class DataHandle>
861void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
862 CpGridData* distributed_data)
863{
864#if HAVE_MPI
865 if(data.contains(3,0))
866 gatherCodimData<0>(data, global_data, distributed_data);
867 if(data.contains(3,3))
868 gatherCodimData<3>(data, global_data, distributed_data);
869#endif
870}
871
872template<int codim, class DataHandle>
873void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
874 CpGridData* distributed_data)
875{
876#if HAVE_MPI
877 // Get the mapping to global index from the global id set
878 const std::vector<int>& mapping =
879 distributed_data->global_id_set_->getMapping<codim>();
880
881 // Get the global indices and data size for the entities whose data is
882 // to be sent, i.e. the ones that we own.
883 std::vector<int> owned_global_indices;
884 std::vector<int> owned_sizes;
885 owned_global_indices.reserve(mapping.size());
886 owned_sizes.reserve(mapping.size());
887
888 GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
889 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
890
891 // communicate the number of indices that each processor sends
892 int no_indices=owned_sizes.size();
893 // We will take the address of the first elemet for MPI_Allgather below.
894 // Make sure the containers have such an element.
895 if ( owned_global_indices.empty() )
896 owned_global_indices.resize(1);
897 if ( owned_sizes.empty() )
898 owned_sizes.resize(1);
899 std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
900 distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
901 // compute size of the vector capable for receiving all indices
902 // and allgather the global indices and the sizes.
903 // calculate displacements
904 std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
905 std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
906 std::plus<int>());
907 int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
908 std::vector<int> global_indices(global_size);
909 std::vector<int> global_sizes(global_size);
910 MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
911 &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
912 MPITraits<int>::getType(),
913 distributed_data->ccobj_);
914 MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
915 &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
916 MPITraits<int>::getType(),
917 distributed_data->ccobj_);
918 std::vector<int>().swap(owned_global_indices); // free data for reuse.
919 // Compute the number of data items to send
920 std::vector<int> no_data_send(distributed_data->ccobj_.size());
921 for(typename std::vector<int>::iterator begin=no_data_send.begin(),
922 i=begin, end=no_data_send.end(); i!=end; ++i)
923 *i = std::accumulate(global_sizes.begin()+displ[i-begin],
924 global_sizes.begin()+displ[i-begin+1], std::size_t());
925 // free at least some memory that can be reused.
926 std::vector<int>().swap(owned_sizes);
927 // compute the displacements for receiving with allgatherv
928 displ[0]=0;
929 std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
930 std::plus<std::size_t>());
931 // Compute the number of data items we will receive
932 int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
933
934 // Collect the data to send, gather it
935 mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
936 if ( no_data_send[distributed_data->ccobj_.rank()] )
937 {
938 local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
939 }
940 else
941 {
942 local_data_buffer.resize(1);
943 }
944 global_data_buffer.resize(no_data_recv);
945
946 DataGatherer<DataHandle> gatherer(local_data_buffer, data);
947 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
948 MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
949 MPITraits<typename DataHandle::DataType>::getType(),
950 &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
951 MPITraits<typename DataHandle::DataType>::getType(),
952 distributed_data->ccobj_);
953 Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
954 int offset=0;
955 for(int i=0; i< codim; ++i)
956 offset+=global_data->size(i);
957
958 typename std::vector<int>::const_iterator s=global_sizes.begin();
959 for(typename std::vector<int>::const_iterator i=global_indices.begin(),
960 end=global_indices.end();
961 i!=end; ++s, ++i)
962 {
963 edata.scatter(global_data_buffer, *i-offset, *s);
964 }
965#endif
966}
967
968} // end namespace cpgrid
969} // end namespace Dune
970
971#endif
972
973#endif
[ provides Dune::Grid ]
Definition: CpGrid.hpp:210
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:123
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:140
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:163
const std::array< int, 3 > & logicalCartesianSize() const
The logical cartesian size of the grid.
Definition: CpGridData.hpp:281
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:614
bool uniqueBoundaryIds() const
Is the grid currently using unique boundary ids?
Definition: CpGridData.hpp:249
CpGridData()
Constructor.
Definition: CpGridData.cpp:43
void readSintefLegacyFormat(const std::string &grid_prefix)
Read the Sintef legacy grid format ('topogeom').
Definition: readSintefLegacyFormat.cpp:66
int size(int codim) const
number of leaf entities per codim in this process
Definition: CpGridData.cpp:144
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false)
Read the Eclipse grid format ('grdecl').
~CpGridData()
Destructor.
Definition: CpGridData.cpp:93
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
Definition: CpGridData.hpp:235
const IndexSet & indexSet() const
Get the index set.
Definition: CpGridData.hpp:274
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
Definition: CpGridData.cpp:1553
const std::vector< double > & zcornData() const
Return the internalized zcorn copy from the grid processing, if no cells were adjusted during the min...
Definition: CpGridData.hpp:267
AttributeSet
The type of the set of the attributes.
Definition: CpGridData.hpp:364
void setUniqueBoundaryIds(bool uids)
Set whether we want to have unique boundary ids.
Definition: CpGridData.hpp:256
void writeSintefLegacyFormat(const std::string &grid_prefix) const
Write the Sintef legacy grid format ('topogeom').
Definition: writeSintefLegacyFormat.cpp:72
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition: CpGridData.hpp:368
void processEclipseFormat(const grdecl &input_data, Opm::EclipseState *ecl_state, std::array< std::set< std::pair< int, int > >, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive)
Read the Eclipse grid format ('grdecl').
Definition: processEclipseFormat.cpp:249
Definition: DefaultGeometryPolicy.hpp:49
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:74
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:54
This class encapsulates geometry for both vertices, intersections and cells.
Definition: Geometry.hpp:71
The global id set for Dune.
Definition: Indexsets.hpp:325
Definition: Indexsets.hpp:198
Definition: Indexsets.hpp:54
Definition: Indexsets.hpp:257
Definition: PartitionTypeIndicator.hpp:50
Copyright 2019 Equinor AS.
Definition: CartesianIndexMapper.hpp:10
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:29
Low-level corner-point processing routines and supporting data structures.
Definition: CpGridData.hpp:115
Raw corner-point specification of a particular geological model.
Definition: preprocess.h:56