All Classes Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
globalindices.hh
Go to the documentation of this file.
1 // -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 // vi: set et ts=4 sw=4 sts=4:
3 /*
4  This file is part of the Open Porous Media project (OPM).
5 
6  OPM is free software: you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation, either version 2 of the License, or
9  (at your option) any later version.
10 
11  OPM is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with OPM. If not, see <http://www.gnu.org/licenses/>.
18 
19  Consult the COPYING file in the top-level source directory of this
20  module for the precise wording of the license and the list of
21  copyright holders.
22 */
27 #ifndef EWOMS_GLOBAL_INDICES_HH
28 #define EWOMS_GLOBAL_INDICES_HH
29 
30 #include <dune/grid/common/datahandleif.hh>
31 #include <dune/istl/bcrsmatrix.hh>
32 #include <dune/istl/scalarproducts.hh>
33 #include <dune/istl/operators.hh>
34 
35 #include <algorithm>
36 #include <set>
37 #include <map>
38 #include <iostream>
39 #include <tuple>
40 
41 #if HAVE_MPI
42 #include <mpi.h>
43 #endif
44 
45 #include "overlaptypes.hh"
46 
47 namespace Ewoms {
48 namespace Linear {
54 template <class ForeignOverlap>
56 {
57  GlobalIndices(const GlobalIndices& ) = delete;
58 
59  typedef std::map<Index, Index> GlobalToDomesticMap;
60  typedef std::map<Index, Index> DomesticToGlobalMap;
61 
62 public:
63  GlobalIndices(const ForeignOverlap& foreignOverlap)
64  : foreignOverlap_(foreignOverlap)
65  {
66  myRank_ = 0;
67  mpiSize_ = 1;
68 
69 #if HAVE_MPI
70  {
71  int tmp;
72  MPI_Comm_rank(MPI_COMM_WORLD, &tmp);
73  myRank_ = static_cast<ProcessRank>(tmp);
74  MPI_Comm_size(MPI_COMM_WORLD, &tmp);
75  mpiSize_ = static_cast<size_t>(tmp);
76  }
77 #endif
78 
79  // calculate the domestic overlap (i.e. all overlap indices in
80  // foreign processes which the current process overlaps.)
81  // This requires communication via MPI.
82  buildGlobalIndices_();
83  }
84 
88  Index domesticToGlobal(Index domesticIdx) const
89  {
90  assert(domesticToGlobal_.find(domesticIdx) != domesticToGlobal_.end());
91 
92  return domesticToGlobal_.find(domesticIdx)->second;
93  }
94 
98  Index globalToDomestic(Index globalIdx) const
99  {
100  const auto& tmp = globalToDomestic_.find(globalIdx);
101 
102  if (tmp == globalToDomestic_.end())
103  return -1;
104 
105  return tmp->second;
106  }
107 
112  size_t numLocal() const
113  { return foreignOverlap_.numLocal(); }
114 
121  size_t numDomestic() const
122  { return numDomestic_; }
123 
127  void addIndex(Index domesticIdx, Index globalIdx)
128  {
129  domesticToGlobal_[domesticIdx] = globalIdx;
130  globalToDomestic_[globalIdx] = domesticIdx;
131  numDomestic_ = domesticToGlobal_.size();
132 
133  assert(domesticToGlobal_.size() == globalToDomestic_.size());
134  }
135 
139  void sendBorderIndex(ProcessRank peerRank, Index domesticIdx, Index peerLocalIdx)
140  {
141 #if HAVE_MPI
142  PeerIndexGlobalIndex sendBuf;
143  sendBuf.peerIdx = peerLocalIdx;
144  sendBuf.globalIdx = domesticToGlobal(domesticIdx);
145  MPI_Send(&sendBuf, // buff
146  sizeof(PeerIndexGlobalIndex), // count
147  MPI_BYTE, // data type
148  static_cast<int>(peerRank), // peer process
149  0, // tag
150  MPI_COMM_WORLD); // communicator
151 #endif
152  }
153 
158  void receiveBorderIndex(ProcessRank peerRank)
159  {
160 #if HAVE_MPI
161  PeerIndexGlobalIndex recvBuf;
162  MPI_Recv(&recvBuf, // buff
163  sizeof(PeerIndexGlobalIndex), // count
164  MPI_BYTE, // data type
165  static_cast<int>(peerRank), // peer process
166  0, // tag
167  MPI_COMM_WORLD, // communicator
168  MPI_STATUS_IGNORE); // status
169 
170  Index domesticIdx = foreignOverlap_.nativeToLocal(recvBuf.peerIdx);
171  if (domesticIdx >= 0) {
172  Index globalIdx = recvBuf.globalIdx;
173  addIndex(domesticIdx, globalIdx);
174  }
175 #endif // HAVE_MPI
176  }
177 
181  bool hasGlobalIndex(Index globalIdx) const
182  { return globalToDomestic_.find(globalIdx) != globalToDomestic_.end(); }
183 
188  void print() const
189  {
190  std::cout << "(domestic index, global index, domestic->global->domestic)"
191  << " list for rank " << myRank_ << "\n";
192 
193  for (size_t domIdx = 0; domIdx < domesticToGlobal_.size(); ++domIdx)
194  std::cout << "(" << domIdx << ", " << domesticToGlobal(domIdx)
195  << ", " << globalToDomestic(domesticToGlobal(domIdx)) << ") ";
196  std::cout << "\n" << std::flush;
197  }
198 
199 protected:
200  // retrieve the offset for the indices where we are master in the
201  // global index list
202  void buildGlobalIndices_()
203  {
204 #if HAVE_MPI
205  numDomestic_ = 0;
206 #else
207  numDomestic_ = foreignOverlap_.numLocal();
208 #endif
209 
210 #if HAVE_MPI
211  if (myRank_ == 0) {
212  // the first rank starts at index zero
213  domesticOffset_ = 0;
214  }
215  else {
216  // all other ranks retrieve their offset from the next
217  // lower rank
218  MPI_Recv(&domesticOffset_, // buffer
219  1, // count
220  MPI_INT, // data type
221  static_cast<int>(myRank_ - 1), // peer rank
222  0, // tag
223  MPI_COMM_WORLD, // communicator
224  MPI_STATUS_IGNORE);
225  }
226 
227  // create maps for all indices for which the current process
228  // is the master
229  int numMaster = 0;
230  for (unsigned i = 0; i < foreignOverlap_.numLocal(); ++i) {
231  if (!foreignOverlap_.iAmMasterOf(static_cast<Index>(i)))
232  continue;
233 
234  addIndex(static_cast<Index>(i),
235  static_cast<Index>(domesticOffset_ + numMaster));
236  ++numMaster;
237  }
238 
239  if (myRank_ < mpiSize_ - 1) {
240  // send the domestic offset plus the number of master
241  // indices to the process which is one rank higher
242  int tmp = domesticOffset_ + numMaster;
243  MPI_Send(&tmp, // buff
244  1, // count
245  MPI_INT, // data type
246  static_cast<int>(myRank_ + 1), // peer rank
247  0, // tag
248  MPI_COMM_WORLD); // communicator
249  }
250 
251  typename PeerSet::const_iterator peerIt;
252  typename PeerSet::const_iterator peerEndIt = peerSet_().end();
253  // receive the border indices from the lower ranks
254  peerIt = peerSet_().begin();
255  for (; peerIt != peerEndIt; ++peerIt) {
256  if (*peerIt < myRank_)
257  receiveBorderFrom_(*peerIt);
258  }
259 
260  // send the border indices to the higher ranks
261  peerIt = peerSet_().begin();
262  for (; peerIt != peerEndIt; ++peerIt) {
263  if (*peerIt > myRank_)
264  sendBorderTo_(*peerIt);
265  }
266 
267  // receive the border indices from the higher ranks
268  peerIt = peerSet_().begin();
269  for (; peerIt != peerEndIt; ++peerIt) {
270  if (*peerIt > myRank_)
271  receiveBorderFrom_(*peerIt);
272  }
273 
274  // send the border indices to the lower ranks
275  peerIt = peerSet_().begin();
276  for (; peerIt != peerEndIt; ++peerIt) {
277  if (*peerIt < myRank_)
278  sendBorderTo_(*peerIt);
279  }
280 #endif // HAVE_MPI
281  }
282 
283  void sendBorderTo_(ProcessRank peerRank)
284  {
285 #if HAVE_MPI
286  // send (local index on myRank, global index) pairs to the
287  // peers
288  BorderList::const_iterator borderIt = borderList_().begin();
289  BorderList::const_iterator borderEndIt = borderList_().end();
290  for (; borderIt != borderEndIt; ++borderIt) {
291  ProcessRank borderPeer = borderIt->peerRank;
292  BorderDistance borderDistance = borderIt->borderDistance;
293  if (borderPeer != peerRank || borderDistance != 0)
294  continue;
295 
296  Index localIdx = foreignOverlap_.nativeToLocal(borderIt->localIdx);
297  Index peerIdx = borderIt->peerIdx;
298  assert(localIdx >= 0);
299  if (foreignOverlap_.iAmMasterOf(localIdx)) {
300  sendBorderIndex(borderPeer, localIdx, peerIdx);
301  }
302  }
303 #endif // HAVE_MPI
304  }
305 
306  void receiveBorderFrom_(ProcessRank peerRank)
307  {
308 #if HAVE_MPI
309  // retrieve the global indices for which we are not master
310  // from the processes with lower rank
311  BorderList::const_iterator borderIt = borderList_().begin();
312  BorderList::const_iterator borderEndIt = borderList_().end();
313  for (; borderIt != borderEndIt; ++borderIt) {
314  ProcessRank borderPeer = borderIt->peerRank;
315  BorderDistance borderDistance = borderIt->borderDistance;
316  if (borderPeer != peerRank || borderDistance != 0)
317  continue;
318 
319  Index nativeIdx = borderIt->localIdx;
320  Index localIdx = foreignOverlap_.nativeToLocal(nativeIdx);
321  if (localIdx >= 0 && foreignOverlap_.masterRank(localIdx) == borderPeer)
322  receiveBorderIndex(borderPeer);
323  }
324 #endif // HAVE_MPI
325  }
326 
327  const PeerSet& peerSet_() const
328  { return foreignOverlap_.peerSet(); }
329 
330  const BorderList& borderList_() const
331  { return foreignOverlap_.borderList(); }
332 
333  ProcessRank myRank_;
334  size_t mpiSize_;
335 
336  int domesticOffset_;
337  size_t numDomestic_;
338  const ForeignOverlap& foreignOverlap_;
339 
340  GlobalToDomesticMap globalToDomestic_;
341  DomesticToGlobalMap domesticToGlobal_;
342 };
343 
344 } // namespace Linear
345 } // namespace Ewoms
346 
347 #endif
void sendBorderIndex(ProcessRank peerRank, Index domesticIdx, Index peerLocalIdx)
Send a border index to a remote process.
Definition: globalindices.hh:139
size_t numDomestic() const
Returns the number domestic indices.
Definition: globalindices.hh:121
This class maps domestic row indices to and from &quot;global&quot; indices which is used to construct an algeb...
Definition: globalindices.hh:55
size_t numLocal() const
Returns the number of indices which are in the interior or on the border of the current rank...
Definition: globalindices.hh:112
void addIndex(Index domesticIdx, Index globalIdx)
Add an index to the domestic&lt;-&gt;global mapping.
Definition: globalindices.hh:127
bool hasGlobalIndex(Index globalIdx) const
Return true iff a given global index already exists.
Definition: globalindices.hh:181
This files provides several data structures for storing tuples of indices of remote and/or local proc...
Index domesticToGlobal(Index domesticIdx) const
Converts a domestic index to a global one.
Definition: globalindices.hh:88
This structure stores a local index on a peer process and a global index.
Definition: overlaptypes.hh:69
Index globalToDomestic(Index globalIdx) const
Converts a global index to a domestic one.
Definition: globalindices.hh:98
void receiveBorderIndex(ProcessRank peerRank)
Receive an index on the border from a remote process and add it the translation maps.
Definition: globalindices.hh:158
void print() const
Prints the global indices of all domestic indices for debugging purposes.
Definition: globalindices.hh:188