All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Pages
ParallelIstlInformation.hpp
1 /*
2  Copyright 2014, 2015 Dr. Markus Blatt - HPC-Simulation-Software & Services
3  Copyright 2014, 2015 Statoil ASA
4  Copyright 2015 NTNU
5 
6  This file is part of the Open Porous Media project (OPM).
7 
8  OPM is free software: you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation, either version 3 of the License, or
11  (at your option) any later version.
12 
13  OPM is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with OPM. If not, see <http://www.gnu.org/licenses/>.
20 */
21 #ifndef OPM_PARALLELISTLINFORMTION_HEADER_INCLUDED
22 #define OPM_PARALLELISTLINFORMTION_HEADER_INCLUDED
23 
24 
25 #include <opm/core/grid.h>
26 #include <opm/common/ErrorMacros.hpp>
27 #include <boost/any.hpp>
28 #include <exception>
29 
30 #include <algorithm>
31 #include <functional>
32 #include <limits>
33 #include <numeric>
34 #include <type_traits>
35 
36 #if HAVE_MPI && HAVE_DUNE_ISTL
37 
38 #include <opm/common/utility/platform_dependent/disable_warnings.h>
39 #include <mpi.h>
40 #include <dune/istl/owneroverlapcopy.hh>
41 #include <dune/common/parallel/interface.hh>
42 #include <dune/common/parallel/communicator.hh>
43 #include <dune/common/enumset.hh>
44 #include <opm/common/utility/platform_dependent/reenable_warnings.h>
45 
46 namespace Opm
47 {
48 namespace
49 {
50 
51  template<class T>
52  struct is_tuple
53  : std::integral_constant<bool, false>
54  {};
55  template<typename... T>
56  struct is_tuple<std::tuple<T...> >
57  : std::integral_constant<bool, true>
58  {};
59 }
60 
63 class ParallelISTLInformation
64 {
65 public:
67  typedef Dune::OwnerOverlapCopyCommunication<int, int>::ParallelIndexSet ParallelIndexSet;
69  typedef Dune::OwnerOverlapCopyCommunication<int, int>::RemoteIndices RemoteIndices;
70 
72  ParallelISTLInformation()
73  : indexSet_(new ParallelIndexSet),
74  remoteIndices_(new RemoteIndices(*indexSet_, *indexSet_, MPI_COMM_WORLD)),
75  communicator_(MPI_COMM_WORLD)
76  {}
79  ParallelISTLInformation(MPI_Comm communicator)
80  : indexSet_(new ParallelIndexSet),
81  remoteIndices_(new RemoteIndices(*indexSet_, *indexSet_, communicator)),
82  communicator_(communicator)
83  {}
88  ParallelISTLInformation(const std::shared_ptr<ParallelIndexSet>& indexSet,
89  const std::shared_ptr<RemoteIndices>& remoteIndices,
90  MPI_Comm communicator)
91  : indexSet_(indexSet), remoteIndices_(remoteIndices), communicator_(communicator)
92  {}
96  ParallelISTLInformation(const ParallelISTLInformation& other)
97  : indexSet_(other.indexSet_), remoteIndices_(other.remoteIndices_),
98  communicator_(other.communicator_)
99  {}
101  std::shared_ptr<ParallelIndexSet> indexSet() const
102  {
103  return indexSet_;
104  }
106  std::shared_ptr<RemoteIndices> remoteIndices() const
107  {
108  return remoteIndices_;
109  }
111  Dune::CollectiveCommunication<MPI_Comm> communicator() const
112  {
113  return communicator_;
114  }
118  void copyValuesTo(ParallelIndexSet& indexSet, RemoteIndices& remoteIndices,
119  std::size_t local_component_size = 0, std::size_t num_components = 1) const
120  {
121  ParallelIndexSet::GlobalIndex global_component_size = local_component_size;
122  if ( num_components > 1 )
123  {
124  ParallelIndexSet::GlobalIndex max_gi = 0;
125  // component the max global index
126  for( auto i = indexSet_->begin(), end = indexSet_->end(); i != end; ++i )
127  {
128  max_gi = std::max(max_gi, i->global());
129  }
130  global_component_size = max_gi+1;
131  global_component_size = communicator_.max(global_component_size);
132  }
133  indexSet.beginResize();
134  IndexSetInserter<ParallelIndexSet> inserter(indexSet, global_component_size,
135  local_component_size, num_components);
136  std::for_each(indexSet_->begin(), indexSet_->end(), inserter);
137  indexSet.endResize();
138  remoteIndices.rebuild<false>();
139  }
143  template<class T>
144  void copyOwnerToAll (const T& source, T& dest) const
145  {
146  typedef Dune::Combine<Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::owner>,Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::overlap>,Dune::OwnerOverlapCopyAttributeSet::AttributeSet> OwnerOverlapSet;
147  typedef Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::owner> OwnerSet;
148  typedef Dune::Combine<OwnerOverlapSet, Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::copy>,Dune::OwnerOverlapCopyAttributeSet::AttributeSet> AllSet;
149  OwnerSet sourceFlags;
150  AllSet destFlags;
151  Dune::Interface interface(communicator_);
152  if( !remoteIndices_->isSynced() )
153  {
154  remoteIndices_->rebuild<false>();
155  }
156  interface.build(*remoteIndices_,sourceFlags,destFlags);
157  Dune::BufferedCommunicator communicator;
158  communicator.template build<T>(interface);
159  communicator.template forward<CopyGatherScatter<T> >(source,dest);
160  communicator.free();
161  }
162  template<class T>
163  const std::vector<double>& updateOwnerMask(const T& container) const
164  {
165  if( ! indexSet_ )
166  {
167  OPM_THROW(std::runtime_error, "Trying to update owner mask without parallel information!");
168  }
169  if( static_cast<std::size_t>(container.size())!= ownerMask_.size() )
170  {
171  ownerMask_.resize(container.size(), 1.);
172  for( auto i=indexSet_->begin(), end=indexSet_->end(); i!=end; ++i )
173  {
174  if (i->local().attribute()!=Dune::OwnerOverlapCopyAttributeSet::owner)
175  {
176  ownerMask_[i->local().local()] = 0.;
177  }
178  }
179  }
180  return ownerMask_;
181  }
182 
188  const std::vector<double>& getOwnerMask() const
189  {
190  return ownerMask_;
191  }
192 
212  template<typename Container, typename BinaryOperator, typename T>
213  void computeReduction(const Container& container, BinaryOperator binaryOperator,
214  T& value) const
215  {
216  computeReduction(container, binaryOperator, value, is_tuple<Container>());
217  }
218 private:
222  template<typename Container, typename BinaryOperator, typename T>
223  void computeReduction(const Container& container, BinaryOperator binaryOperator,
224  T& value, std::integral_constant<bool,true>) const
225  {
226  computeTupleReduction(container, binaryOperator, value);
227  }
231  template<typename Container, typename BinaryOperator, typename T>
232  void computeReduction(const Container& container, BinaryOperator binaryOperator,
233  T& value, std::integral_constant<bool,false>) const
234  {
235  std::tuple<const Container&> containers=std::tuple<const Container&>(container);
236  auto values=std::make_tuple(value);
237  auto operators=std::make_tuple(binaryOperator);
238  computeTupleReduction(containers, operators, values);
239  value=std::get<0>(values);
240  }
242  template<typename... Containers, typename... BinaryOperators, typename... ReturnValues>
243  void computeTupleReduction(const std::tuple<Containers...>& containers,
244  std::tuple<BinaryOperators...>& operators,
245  std::tuple<ReturnValues...>& values) const
246  {
247  static_assert(std::tuple_size<std::tuple<Containers...> >::value==
248  std::tuple_size<std::tuple<BinaryOperators...> >::value,
249  "We need the same number of containers and binary operators");
250  static_assert(std::tuple_size<std::tuple<Containers...> >::value==
251  std::tuple_size<std::tuple<ReturnValues...> >::value,
252  "We need the same number of containers and return values");
253  if( std::tuple_size<std::tuple<Containers...> >::value==0 )
254  {
255  return;
256  }
257  // Copy the initial values.
258  std::tuple<ReturnValues...> init=values;
259  updateOwnerMask(std::get<0>(containers));
260  computeLocalReduction(containers, operators, values);
261  std::vector<std::tuple<ReturnValues...> > receivedValues(communicator_.size());
262  communicator_.allgather(&values, 1, &(receivedValues[0]));
263  values=init;
264  for( auto rvals=receivedValues.begin(), endvals=receivedValues.end(); rvals!=endvals;
265  ++rvals )
266  {
267  computeGlobalReduction(*rvals, operators, values);
268  }
269  }
273  template<int I=0, typename... BinaryOperators, typename... ReturnValues>
274  typename std::enable_if<I == sizeof...(BinaryOperators), void>::type
275  computeGlobalReduction(const std::tuple<ReturnValues...>&,
276  std::tuple<BinaryOperators...>&,
277  std::tuple<ReturnValues...>&) const
278  {}
280  template<int I=0, typename... BinaryOperators, typename... ReturnValues>
281  typename std::enable_if<I !=sizeof...(BinaryOperators), void>::type
282  computeGlobalReduction(const std::tuple<ReturnValues...>& receivedValues,
283  std::tuple<BinaryOperators...>& operators,
284  std::tuple<ReturnValues...>& values) const
285  {
286  auto& val=std::get<I>(values);
287  val = std::get<I>(operators).localOperator()(val, std::get<I>(receivedValues));
288  computeGlobalReduction<I+1>(receivedValues, operators, values);
289  }
293  template<int I=0, typename... Containers, typename... BinaryOperators, typename... ReturnValues>
294  typename std::enable_if<I==sizeof...(Containers), void>::type
295  computeLocalReduction(const std::tuple<Containers...>&,
296  std::tuple<BinaryOperators...>&,
297  std::tuple<ReturnValues...>&) const
298  {}
300  template<int I=0, typename... Containers, typename... BinaryOperators, typename... ReturnValues>
301  typename std::enable_if<I!=sizeof...(Containers), void>::type
302  computeLocalReduction(const std::tuple<Containers...>& containers,
303  std::tuple<BinaryOperators...>& operators,
304  std::tuple<ReturnValues...>& values) const
305  {
306  const auto& container = std::get<I>(containers);
307  if( container.size() )
308  {
309  auto& reduceOperator = std::get<I>(operators);
310  // Eigen:Block does not support STL iterators!!!!
311  // Therefore we need to rely on the harder random-access
312  // property of the containers. But this should be save, too.
313  // Just commenting out code in the hope that Eigen might improve
314  // in this regard in the future.
315  //auto newVal = container.begin();
316  auto mask = ownerMask_.begin();
317  auto& value = std::get<I>(values);
318  value = reduceOperator.getInitialValue();
319 
320  for( auto endVal=ownerMask_.end(); mask!=endVal;
321  /*++newVal,*/ ++mask )
322  {
323  value = reduceOperator(value, container[mask-ownerMask_.begin()], *mask);
324  }
325  }
326  computeLocalReduction<I+1>(containers, operators, values);
327  }
329  template<typename T>
330  struct CopyGatherScatter
331  {
332  typedef typename Dune::CommPolicy<T>::IndexedType V;
333 
334  static V gather(const T& a, std::size_t i)
335  {
336  return a[i];
337  }
338 
339  static void scatter(T& a, V v, std::size_t i)
340  {
341  a[i] = v;
342  }
343  };
344  template<class T>
345  class IndexSetInserter
346  {
347  public:
348  typedef T ParallelIndexSet;
349  typedef typename ParallelIndexSet::LocalIndex LocalIndex;
350  typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
351 
352  IndexSetInserter(ParallelIndexSet& indexSet, const GlobalIndex& component_size,
353  std::size_t local_component_size, std::size_t num_components)
354  : indexSet_(&indexSet), component_size_(component_size),
355  local_component_size_(local_component_size),
356  num_components_(num_components)
357  {}
358  void operator()(const typename ParallelIndexSet::IndexPair& pair)
359  {
360  for(std::size_t i = 0; i < num_components_; i++)
361  indexSet_->add(i * component_size_ + pair.global(),
362  LocalIndex(i * local_component_size_ + pair.local(),
363  pair.local().attribute()));
364  }
365  private:
366  ParallelIndexSet* indexSet_;
368  GlobalIndex component_size_;
370  std::size_t local_component_size_;
372  std::size_t num_components_;
373  };
374  std::shared_ptr<ParallelIndexSet> indexSet_;
375  std::shared_ptr<RemoteIndices> remoteIndices_;
376  Dune::CollectiveCommunication<MPI_Comm> communicator_;
377  mutable std::vector<double> ownerMask_;
378 };
379 
380  namespace Reduction
381  {
386  // the reduction operation.
387  template<typename BinaryOperator>
388  struct MaskIDOperator
389  {
390  // This is a real nice one: numeric limits needs a type without const
391  // or reference qualifier. Otherwise we get complete nonesense.
392  typedef typename std::remove_cv<
393  typename std::remove_reference<typename BinaryOperator::result_type>::type
394  >::type Result;
401  template<class T, class T1>
402  T operator()(const T& t1, const T& t2, const T1& mask)
403  {
404  return b_(t1, maskValue(t2, mask));
405  }
406  template<class T, class T1>
407  T maskValue(const T& t, const T1& mask)
408  {
409  return t*mask;
410  }
411  BinaryOperator& localOperator()
412  {
413  return b_;
414  }
415  Result getInitialValue()
416  {
417  return Result();
418  }
419  private:
420  BinaryOperator b_;
421  };
422 
424  template<class T>
425  struct InnerProductFunctor
426  {
433  template<class T1>
434  T operator()(const T& t1, const T& t2, const T1& mask)
435  {
436  T masked = maskValue(t2, mask);
437  return t1 + masked * masked;
438  }
439  template<class T1>
440  T maskValue(const T& t, const T1& mask)
441  {
442  return t*mask;
443  }
444  std::plus<T> localOperator()
445  {
446  return std::plus<T>();
447  }
448  T getInitialValue()
449  {
450  return T();
451  }
452  };
453 
458  // the reduction operation.
459  template<typename BinaryOperator>
460  struct MaskToMinOperator
461  {
462  // This is a real nice one: numeric limits has to a type without const
463  // or reference. Otherwise we get complete nonesense.
464  typedef typename std::remove_reference<
465  typename std::remove_const<typename BinaryOperator::result_type>::type
466  >::type Result;
467 
468  MaskToMinOperator(BinaryOperator b)
469  : b_(b)
470  {}
477  template<class T, class T1>
478  T operator()(const T& t1, const T& t2, const T1& mask)
479  {
480  return b_(t1, maskValue(t2, mask));
481  }
482  template<class T, class T1>
483  T maskValue(const T& t, const T1& mask)
484  {
485  if( mask )
486  {
487  return t;
488  }
489  else
490  {
491  return getInitialValue();
492  }
493  }
494  Result getInitialValue()
495  {
496  //g++-4.4 does not support std::numeric_limits<T>::lowest();
497  // we rely on IEE 754 for floating point values and use min()
498  // for integral types.
499  if( std::is_integral<Result>::value )
500  {
501  return std::numeric_limits<Result>::min();
502  }
503  else
504  {
505  return -std::numeric_limits<Result>::max();
506  }
507  }
512  BinaryOperator& localOperator()
513  {
514  return b_;
515  }
516  private:
517  BinaryOperator b_;
518  };
519 
523  template<typename BinaryOperator>
524  struct MaskToMaxOperator
525  {
526  // This is a real nice one: numeric limits has to a type without const
527  // or reference. Otherwise we get complete nonesense.
528  typedef typename std::remove_cv<
529  typename std::remove_reference<typename BinaryOperator::result_type>::type
530  >::type Result;
531 
532  MaskToMaxOperator(BinaryOperator b)
533  : b_(b)
534  {}
541  template<class T, class T1>
542  T operator()(const T& t1, const T& t2, const T1& mask)
543  {
544  return b_(t1, maskValue(t2, mask));
545  }
546  template<class T, class T1>
547  T maskValue(const T& t, const T1& mask)
548  {
549  if( mask )
550  {
551  return t;
552  }
553  else
554  {
555  return std::numeric_limits<T>::max();
556  }
557  }
558  BinaryOperator& localOperator()
559  {
560  return b_;
561  }
562  Result getInitialValue()
563  {
564  return std::numeric_limits<Result>::max();
565  }
566  private:
567  BinaryOperator b_;
568  };
572  template<class T>
573  MaskIDOperator<std::plus<T> >
574  makeGlobalSumFunctor()
575  {
576  return MaskIDOperator<std::plus<T> >();
577  }
581  template<class T>
582  MaskToMinOperator<std::pointer_to_binary_function<const T&,const T&,const T&> >
583  makeGlobalMaxFunctor()
584  {
585  return MaskToMinOperator<std::pointer_to_binary_function<const T&,const T&,const T&> >
586  (std::pointer_to_binary_function<const T&,const T&,const T&>
587  ((const T&(*)(const T&, const T&))std::max<T>));
588  }
589 
590  namespace detail
591  {
593  template<typename T, typename Enable = void>
594  struct MaxAbsFunctor
595  {
596  using result_type = T;
597  result_type operator()(const T& t1,
598  const T& t2)
599  {
600  return std::max(std::abs(t1), std::abs(t2));
601  }
602  };
603 
604  // Specialization for unsigned integers. They need their own
605  // version since abs(x) is ambiguous (as well as somewhat
606  // meaningless).
607  template<typename T>
608  struct MaxAbsFunctor<T, typename std::enable_if<std::is_unsigned<T>::value>::type>
609  {
610  using result_type = T;
611  result_type operator()(const T& t1,
612  const T& t2)
613  {
614  return std::max(t1, t2);
615  }
616  };
617  }
618 
622  template<class T>
623  MaskIDOperator<detail::MaxAbsFunctor<T> >
624  makeLInfinityNormFunctor()
625  {
626  return MaskIDOperator<detail::MaxAbsFunctor<T> >();
627  }
631  template<class T>
632  MaskToMaxOperator<std::pointer_to_binary_function<const T&,const T&,const T&> >
633  makeGlobalMinFunctor()
634  {
635  return MaskToMaxOperator<std::pointer_to_binary_function<const T&,const T&,const T&> >
636  (std::pointer_to_binary_function<const T&,const T&,const T&>
637  ((const T&(*)(const T&, const T&))std::min<T>));
638  }
639  template<class T>
640  InnerProductFunctor<T>
641  makeInnerProductFunctor()
642  {
643  return InnerProductFunctor<T>();
644  }
645  } // end namespace Reduction
646 } // end namespace Opm
647 
648 #endif
649 
650 namespace Opm
651 {
661 
662 inline void extractParallelGridInformationToISTL(boost::any& anyComm, const UnstructuredGrid& grid)
663 {
664  (void)anyComm; (void)grid;
665 }
666 
673 template<class T1>
674 auto
675 accumulateMaskedValues(const T1& container, const std::vector<double>* maskContainer)
676  -> decltype(container[0]*(*maskContainer)[0])
677 {
678  decltype(container[0]*(*maskContainer)[0]) initial = 0;
679 
680  if( maskContainer )
681  {
682  return std::inner_product(container.begin(), container.end(), maskContainer->begin(),
683  initial);
684  }else
685  {
686  return std::accumulate(container.begin(), container.end(), initial);
687  }
688 }
689 } // end namespace Opm
690 
691 #endif