ParallelDebugOutput.hpp
1 /*
2  Copyright 2015 IRIS AS
3 
4  This file is part of the Open Porous Media project (OPM).
5 
6  OPM is free software: you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation, either version 3 of the License, or
9  (at your option) any later version.
10 
11  OPM is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with OPM. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef OPM_PARALLELDEBUGOUTPUT_HEADER_INCLUDED
20 #define OPM_PARALLELDEBUGOUTPUT_HEADER_INCLUDED
21 
22 #include <unordered_set>
23 
24 #include <opm/common/data/SimulationDataContainer.hpp>
25 
26 
27 #include <opm/core/grid.h>
28 #include <opm/core/simulator/WellState.hpp>
29 #include <opm/core/wells/WellsManager.hpp>
30 
31 #include <opm/autodiff/WellStateFullyImplicitBlackoil.hpp>
32 #include <opm/autodiff/WellStateFullyImplicitBlackoil.hpp>
33 #include <opm/core/wells/DynamicListEconLimited.hpp>
34 
35 #if HAVE_OPM_GRID
36 #include <dune/grid/common/p2pcommunicator.hh>
37 #endif
38 
39 namespace Opm
40 {
41 
43 
44  {
45  protected:
47  public:
48  virtual ~ParallelDebugOutputInterface() {}
49 
57  virtual bool collectToIORank( const SimulationDataContainer& localReservoirState,
58  const WellStateFullyImplicitBlackoil& localWellState,
59  const data::Solution& localCellData,
60  const int wellStateStepNumber ) = 0;
61 
62  virtual const SimulationDataContainer& globalReservoirState() const = 0 ;
63  virtual const data::Solution& globalCellData() const = 0 ;
64  virtual const WellStateFullyImplicitBlackoil& globalWellState() const = 0 ;
65  virtual bool isIORank() const = 0;
66  virtual bool isParallel() const = 0;
67  virtual int numCells() const = 0 ;
68  virtual const int* globalCell() const = 0;
69  };
70 
71  template <class GridImpl>
73  {
74  protected:
75  const GridImpl& grid_;
76 
77  const SimulationDataContainer* globalState_;
78  const WellStateFullyImplicitBlackoil* wellState_;
79  const data::Solution* globalCellData_;
80 
81  public:
82  ParallelDebugOutput ( const GridImpl& grid,
83  const EclipseState& /* eclipseState */,
84  const int,
85  const Opm::PhaseUsage& )
86  : grid_( grid ) {}
87 
88  // gather solution to rank 0 for EclipseWriter
89  virtual bool collectToIORank( const SimulationDataContainer& localReservoirState,
90  const WellStateFullyImplicitBlackoil& localWellState,
91  const data::Solution& localCellData,
92  const int /* wellStateStepNumber */)
93  {
94  globalState_ = &localReservoirState;
95  wellState_ = &localWellState;
96  globalCellData_ = &localCellData;
97  return true ;
98  }
99 
100  virtual const SimulationDataContainer& globalReservoirState() const { return *globalState_; }
101  virtual const data::Solution& globalCellData() const
102  {
103  return *globalCellData_;
104  }
105  virtual const WellStateFullyImplicitBlackoil& globalWellState() const { return *wellState_; }
106  virtual bool isIORank () const { return true; }
107  virtual bool isParallel () const { return false; }
108  virtual int numCells() const { return Opm::AutoDiffGrid::numCells(grid_); }
109  virtual const int* globalCell() const { return Opm::AutoDiffGrid::globalCell(grid_); }
110  };
111 
112 #if HAVE_OPM_GRID
113  template <>
114  class ParallelDebugOutput< Dune::CpGrid> : public ParallelDebugOutputInterface
115  {
116  public:
117  typedef Dune::CpGrid Grid;
118  typedef typename Grid :: CollectiveCommunication CollectiveCommunication;
119 
120  // global id
121  class GlobalCellIndex
122  {
123  int globalId_;
124  int localIndex_;
125  bool isInterior_;
126  public:
127  GlobalCellIndex() : globalId_(-1), localIndex_(-1), isInterior_(true) {}
128  void setGhost() { isInterior_ = false; }
129 
130  void setId( const int globalId ) { globalId_ = globalId; }
131  void setIndex( const int localIndex ) { localIndex_ = localIndex; }
132 
133  int localIndex () const { return localIndex_; }
134  int id () const { return globalId_; }
135  bool isInterior() const { return isInterior_; }
136  };
137 
138  typedef typename Dune::PersistentContainer< Grid, GlobalCellIndex > GlobalIndexContainer;
139 
140  static const int dimension = Grid :: dimension ;
141 
142  typedef typename Grid :: LeafGridView GridView;
143  typedef GridView AllGridView;
144 
145  typedef Dune :: Point2PointCommunicator< Dune :: SimpleMessageBuffer > P2PCommunicatorType;
146  typedef typename P2PCommunicatorType :: MessageBufferType MessageBufferType;
147 
148  typedef std::vector< GlobalCellIndex > LocalIndexMapType;
149 
150  typedef std::vector<int> IndexMapType;
151  typedef std::vector< IndexMapType > IndexMapStorageType;
152 
153  class DistributeIndexMapping : public P2PCommunicatorType::DataHandleInterface
154  {
155  protected:
156  const std::vector<int>& distributedGlobalIndex_;
157  IndexMapType& localIndexMap_;
158  IndexMapStorageType& indexMaps_;
159  std::map< const int, const int > globalPosition_;
160 #ifndef NDEBUG
161  std::set< int > checkPosition_;
162 #endif
163 
164  public:
165  DistributeIndexMapping( const std::vector<int>& globalIndex,
166  const std::vector<int>& distributedGlobalIndex,
167  IndexMapType& localIndexMap,
168  IndexMapStorageType& indexMaps )
169  : distributedGlobalIndex_( distributedGlobalIndex ),
170  localIndexMap_( localIndexMap ),
171  indexMaps_( indexMaps ),
172  globalPosition_()
173  {
174  const size_t size = globalIndex.size();
175  // create mapping globalIndex --> localIndex
176  for ( size_t index = 0; index < size; ++index )
177  {
178  globalPosition_.insert( std::make_pair( globalIndex[ index ], index ) );
179  }
180 
181  // on I/O rank we need to create a mapping from local to global
182  if( ! indexMaps_.empty() )
183  {
184  // for the ioRank create a localIndex to index in global state map
185  IndexMapType& indexMap = indexMaps_.back();
186  const size_t localSize = localIndexMap_.size();
187  indexMap.resize( localSize );
188  for( size_t i=0; i<localSize; ++i )
189  {
190  const int id = distributedGlobalIndex_[ localIndexMap_[ i ] ];
191  indexMap[ i ] = globalPosition_[ id ] ;
192 #ifndef NDEBUG
193  assert( checkPosition_.find( id ) == checkPosition_.end() );
194  checkPosition_.insert( id );
195 #endif
196  }
197  }
198  }
199 
200  void pack( const int link, MessageBufferType& buffer )
201  {
202  // we should only get one link
203  if( link != 0 ) {
204  OPM_THROW(std::logic_error,"link in method pack is not 0 as execpted");
205  }
206 
207  // pack all interior global cell id's
208  const int size = localIndexMap_.size();
209  buffer.write( size );
210 
211  for( int index = 0; index < size; ++index )
212  {
213  const int globalIdx = distributedGlobalIndex_[ localIndexMap_[ index ] ];
214  buffer.write( globalIdx );
215  }
216  }
217 
218  void unpack( const int link, MessageBufferType& buffer )
219  {
220  // get index map for current link
221  IndexMapType& indexMap = indexMaps_[ link ];
222  assert( ! globalPosition_.empty() );
223 
224  // unpack all interior global cell id's
225  int numCells = 0;
226  buffer.read( numCells );
227  indexMap.resize( numCells );
228  for( int index = 0; index < numCells; ++index )
229  {
230  int globalId = -1;
231  buffer.read( globalId );
232  assert( globalPosition_.find( globalId ) != globalPosition_.end() );
233  indexMap[ index ] = globalPosition_[ globalId ];
234 #ifndef NDEBUG
235  assert( checkPosition_.find( globalId ) == checkPosition_.end() );
236  checkPosition_.insert( globalId );
237 #endif
238  }
239  }
240  };
241 
242  enum { ioRank = 0 };
243 
249  ParallelDebugOutput( const Dune::CpGrid& otherGrid,
250  const EclipseState& eclipseState,
251  const int numPhases,
252  const Opm::PhaseUsage& phaseUsage)
253  : grid_(),
254  eclipseState_( eclipseState ),
255  globalCellData_(new data::Solution),
256  isIORank_(true),
257  phaseUsage_(phaseUsage)
258 
259  {
260  // Switch to distributed view unconditionally for safety.
261  Dune::CpGrid distributed_grid = otherGrid;
262 
263  const CollectiveCommunication& comm = otherGrid.comm();
264  if( comm.size() > 1 )
265  {
266  std::set< int > send, recv;
267  distributed_grid.switchToDistributedView();
268  toIORankComm_ = distributed_grid.comm();
269  isIORank_ = (distributed_grid.comm().rank() == ioRank);
270 
271  // the I/O rank receives from all other ranks
272  if( isIORank() )
273  {
274  // copy grid
275  grid_.reset( new Dune::CpGrid(otherGrid ) );
276  grid_->switchToGlobalView();
277  Dune::CpGrid& globalGrid = *grid_;
278 
279  // initialize global state with correct sizes
280  globalReservoirState_.reset( new SimulationDataContainer( globalGrid.numCells(), globalGrid.numFaces(), numPhases ));
281 
282  // copy global cartesian index
283  globalIndex_ = globalGrid.globalCell();
284 
285  unsigned int count = 0;
286  auto gridView = globalGrid.leafGridView();
287  for( auto it = gridView.begin< 0 >(),
288  end = gridView.end< 0 >(); it != end; ++it, ++count )
289  {
290  }
291  assert( count == globalIndex_.size() );
292 
293  for(int i=0; i<comm.size(); ++i)
294  {
295  if( i != ioRank )
296  {
297  recv.insert( i );
298  }
299  }
300  }
301  else // all other simply send to the I/O rank
302  {
303  // globalReservoirState will be deferenced even if this rank is not outputting anything
304  // To prevent dereferencing a nullptr we create an empty container
305  globalReservoirState_.reset( new SimulationDataContainer( 0, 0, 0));
306  send.insert( ioRank );
307  }
308 
309  localIndexMap_.clear();
310  localIndexMap_.reserve( distributed_grid.size( 0 ) );
311 
312  unsigned int index = 0;
313  auto localView = distributed_grid.leafGridView();
314  for( auto it = localView.begin< 0 >(),
315  end = localView.end< 0 >(); it != end; ++it, ++index )
316  {
317  const auto element = *it ;
318  // only store interior element for collection
319  if( element.partitionType() == Dune :: InteriorEntity )
320  {
321  localIndexMap_.push_back( index );
322  }
323  }
324 
325  // insert send and recv linkage to communicator
326  toIORankComm_.insertRequest( send, recv );
327 
328  if( isIORank() )
329  {
330  // need an index map for each rank
331  indexMaps_.clear();
332  indexMaps_.resize( comm.size() );
333  }
334 
335  // distribute global id's to io rank for later association of dof's
336  DistributeIndexMapping distIndexMapping( globalIndex_, distributed_grid.globalCell(), localIndexMap_, indexMaps_ );
337  toIORankComm_.exchange( distIndexMapping );
338  }
339  else // serial run
340  {
341  // copy global cartesian index
342  globalIndex_ = distributed_grid.globalCell();
343  }
344  }
345 
346  class PackUnPackSimulationDataContainer : public P2PCommunicatorType::DataHandleInterface
347  {
348  const data::Solution& localCellData_;
349  data::Solution& globalCellData_;
350  const WellStateFullyImplicitBlackoil& localWellState_;
351  WellStateFullyImplicitBlackoil& globalWellState_;
352  const IndexMapType& localIndexMap_;
353  const IndexMapStorageType& indexMaps_;
354 
355  public:
356  PackUnPackSimulationDataContainer( std::size_t numGlobalCells,
357  const data::Solution& localCellData,
358  data::Solution& globalCellData,
359  const WellStateFullyImplicitBlackoil& localWellState,
360  WellStateFullyImplicitBlackoil& globalWellState,
361  const IndexMapType& localIndexMap,
362  const IndexMapStorageType& indexMaps,
363  const bool isIORank )
364  : localCellData_( localCellData ),
365  globalCellData_( globalCellData ),
366  localWellState_( localWellState ),
367  globalWellState_( globalWellState ),
368  localIndexMap_( localIndexMap ),
369  indexMaps_( indexMaps )
370  {
371 
372  if( isIORank )
373  {
374  // add missing data to global cell data
375  for (const auto& pair : localCellData_) {
376  const std::string& key = pair.first;
377  std::size_t container_size = numGlobalCells;
378  auto ret = globalCellData_.insert(key, pair.second.dim,
379  std::vector<double>(container_size),
380  pair.second.target);
381  assert(ret.second);
382  DUNE_UNUSED_PARAMETER(ret.second); //dummy op to prevent warning with -DNDEBUG
383  }
384 
385  MessageBufferType buffer;
386  pack( 0, buffer );
387  // the last index map is the local one
388  doUnpack( indexMaps.back(), buffer );
389  }
390  }
391 
392  // pack all data associated with link
393  void pack( const int link, MessageBufferType& buffer )
394  {
395  // we should only get one link
396  if( link != 0 ) {
397  OPM_THROW(std::logic_error,"link in method pack is not 0 as execpted");
398  }
399 
400  // write all cell data registered in local state
401  for (const auto& pair : localCellData_) {
402  const auto& data = pair.second.data;
403 
404  // write all data from local data to buffer
405  write( buffer, localIndexMap_, data);
406  }
407 
408  // write all data from local well state to buffer
409  writeWells( buffer );
410  }
411 
412  void doUnpack( const IndexMapType& indexMap, MessageBufferType& buffer )
413  {
414  // we loop over the data as
415  // its order governs the order the data got received.
416  for (auto& pair : localCellData_) {
417  const std::string& key = pair.first;
418  auto& data = globalCellData_.data(key);
419 
420  //write all data from local cell data to buffer
421  read( buffer, indexMap, data);
422  }
423 
424  // read well data from buffer
425  readWells( buffer );
426  }
427 
428  // unpack all data associated with link
429  void unpack( const int link, MessageBufferType& buffer )
430  {
431  doUnpack( indexMaps_[ link ], buffer );
432  }
433 
434  protected:
435  template <class Vector>
436  void write( MessageBufferType& buffer, const IndexMapType& localIndexMap,
437  const Vector& vector,
438  const unsigned int offset = 0, const unsigned int stride = 1 ) const
439  {
440  unsigned int size = localIndexMap.size();
441  buffer.write( size );
442  assert( vector.size() >= stride * size );
443  for( unsigned int i=0; i<size; ++i )
444  {
445  const unsigned int index = localIndexMap[ i ] * stride + offset;
446  assert( index < vector.size() );
447  buffer.write( vector[ index ] );
448  }
449  }
450 
451  template <class Vector>
452  void read( MessageBufferType& buffer,
453  const IndexMapType& indexMap,
454  Vector& vector,
455  const unsigned int offset = 0, const unsigned int stride = 1 ) const
456  {
457  unsigned int size = 0;
458  buffer.read( size );
459  assert( size == indexMap.size() );
460  for( unsigned int i=0; i<size; ++i )
461  {
462  const unsigned int index = indexMap[ i ] * stride + offset;
463  assert( index < vector.size() );
464  buffer.read( vector[ index ] );
465  }
466  }
467 
468  void writeString( MessageBufferType& buffer, const std::string& s) const
469  {
470  const int size = s.size();
471  buffer.write( size );
472  for( int i=0; i<size; ++i )
473  {
474  buffer.write( s[ i ] );
475  }
476  }
477 
478  void readString( MessageBufferType& buffer, std::string& s) const
479  {
480  int size = -1;
481  buffer.read( size );
482  s.resize( size );
483  for( int i=0; i<size; ++i )
484  {
485  buffer.read( s[ i ] );
486  }
487  }
488 
489  void writeWells( MessageBufferType& buffer ) const
490  {
491  int nWells = localWellState_.wellMap().size();
492  buffer.write( nWells );
493  auto end = localWellState_.wellMap().end();
494  for( auto it = localWellState_.wellMap().begin(); it != end; ++it )
495  {
496  const std::string& name = it->first;
497  const int wellIdx = it->second[ 0 ];
498 
499  // write well name
500  writeString( buffer, name );
501 
502  // write well data
503  buffer.write( localWellState_.bhp()[ wellIdx ] );
504  buffer.write( localWellState_.thp()[ wellIdx ] );
505  const int wellRateIdx = wellIdx * localWellState_.numPhases();
506  for( int np=0; np<localWellState_.numPhases(); ++np )
507  buffer.write( localWellState_.wellRates()[ wellRateIdx + np ] );
508 
509  // Write well control
510  buffer.write(localWellState_.currentControls()[ wellIdx ]);
511 
512  // Write perfRates and perfPress. No need to figure out the index
513  // mapping there as the ordering of the perforations should
514  // be the same for global and local state.
515  const int end_con = it->second[1] + it->second[2];
516 
517  for( int con = it->second[1]; con < end_con; ++con )
518  {
519  buffer.write( localWellState_.perfRates()[ con ] );
520  }
521 
522  for( int con = it->second[1]; con < end_con; ++con )
523  {
524  buffer.write( localWellState_.perfPress()[ con ] );
525  }
526 
527  // Write perfPhaseRate
528  const int np = localWellState_.perfPhaseRates().size() /
529  localWellState_.perfRates().size();
530 
531  for( int con = it->second[1]*np; con < end_con*np; ++con )
532  {
533  buffer.write( localWellState_.perfPhaseRates()[ con ] );
534  }
535  }
536  }
537 
538  void readWells( MessageBufferType& buffer )
539  {
540  int nWells = -1;
541  buffer.read( nWells );
542  // unpack all wells that have been sent
543  std::string name ;
544  for( int well = 0; well < nWells ; ++well )
545  {
546  // read well name for local identification
547  readString( buffer, name );
548 
549  // unpack values
550  auto it = globalWellState_.wellMap().find( name );
551  if( it == globalWellState_.wellMap().end() )
552  {
553  OPM_THROW(std::logic_error,"global state does not contain well " << name );
554  }
555  const int wellIdx = it->second[ 0 ];
556 
557  buffer.read( globalWellState_.bhp()[ wellIdx ] );
558  buffer.read( globalWellState_.thp()[ wellIdx ] );
559  const int wellRateIdx = wellIdx * globalWellState_.numPhases();
560  for( int np=0; np<globalWellState_.numPhases(); ++np )
561  buffer.read( globalWellState_.wellRates()[ wellRateIdx + np ] );
562 
563  // Write well control
564  buffer.read(globalWellState_.currentControls()[ wellIdx ]);
565 
566  // Read perfRates and perfPress. No need to figure out the index
567  // mapping there as the ordering of the perforations should
568  // be the same for global and local state.
569  const int end_con = it->second[1] + it->second[2];
570 
571  for( int con = it->second[1]; con < end_con; ++con )
572  {
573  buffer.read( globalWellState_.perfRates()[ con ] );
574  }
575 
576  for( int con = it->second[1]; con < end_con; ++con )
577  {
578  buffer.read( globalWellState_.perfPress()[ con ] );
579  }
580 
581  // Read perfPhaseRate
582  const int np = globalWellState_.perfPhaseRates().size() /
583  globalWellState_.perfRates().size();
584 
585  for( int con = it->second[1]*np; con < end_con*np; ++con )
586  {
587  buffer.read( globalWellState_.perfPhaseRates()[ con ] );
588  }
589  }
590  }
591  };
592 
593  // gather solution to rank 0 for EclipseWriter
594  bool collectToIORank( const SimulationDataContainer& /*localReservoirState*/,
595  const WellStateFullyImplicitBlackoil& localWellState,
596  const data::Solution& localCellData,
597  const int wellStateStepNumber )
598  {
599  if( isIORank() )
600  {
601  Dune::CpGrid& globalGrid = *grid_;
602  // TODO: make a dummy DynamicListEconLimited here for NOW for compilation and development
603  // TODO: NOT SURE whether it will cause problem for parallel running
604  // TODO: TO BE TESTED AND IMPROVED
605  const DynamicListEconLimited dynamic_list_econ_limited;
606  // Create wells and well state.
607  WellsManager wells_manager(eclipseState_,
608  wellStateStepNumber,
609  Opm::UgGridHelpers::numCells( globalGrid ),
610  Opm::UgGridHelpers::globalCell( globalGrid ),
611  Opm::UgGridHelpers::cartDims( globalGrid ),
612  Opm::UgGridHelpers::dimensions( globalGrid ),
613  Opm::UgGridHelpers::cell2Faces( globalGrid ),
614  Opm::UgGridHelpers::beginFaceCentroids( globalGrid ),
615  dynamic_list_econ_limited,
616  false,
617  // We need to pass the optionaly arguments
618  // as we get the following error otherwise
619  // with c++ (Debian 4.9.2-10) 4.9.2 and -std=c++11
620  // converting to ‘const std::unordered_set<std::basic_string<char> >’ from initializer list would use explicit constructor
621  std::unordered_set<std::string>());
622 
623  const Wells* wells = wells_manager.c_wells();
624  globalWellState_.init(wells, *globalReservoirState_, globalWellState_, phaseUsage_ );
625  globalCellData_->clear();
626  }
627 
628  PackUnPackSimulationDataContainer packUnpack( numCells(),
629  localCellData, *globalCellData_,
630  localWellState, globalWellState_,
631  localIndexMap_, indexMaps_,
632  isIORank() );
633 
634  //toIORankComm_.exchangeCached( packUnpack );
635  toIORankComm_.exchange( packUnpack );
636 #ifndef NDEBUG
637  // make sure every process is on the same page
638  toIORankComm_.barrier();
639 #endif
640  if( isIORank() )
641  {
642  // copy values from globalCellData to globalReservoirState
643  const std::map<std::string, std::vector<double> > no_extra_data;
644  solutionToSim(*globalCellData_, no_extra_data, phaseUsage_, *globalReservoirState_);
645  }
646  return isIORank();
647  }
648 
649  const SimulationDataContainer& globalReservoirState() const { return *globalReservoirState_; }
650 
651  const data::Solution& globalCellData() const
652  {
653  return *globalCellData_;
654  }
655 
656  const WellStateFullyImplicitBlackoil& globalWellState() const { return globalWellState_; }
657 
658  bool isIORank() const
659  {
660  return isIORank_;
661  }
662 
663  bool isParallel() const
664  {
665  return toIORankComm_.size() > 1;
666  }
667 
668  int numCells () const { return globalIndex_.size(); }
669  const int* globalCell () const
670  {
671  assert( ! globalIndex_.empty() );
672  return globalIndex_.data();
673  }
674 
675  protected:
676  std::unique_ptr< Dune::CpGrid > grid_;
677  const EclipseState& eclipseState_;
678  P2PCommunicatorType toIORankComm_;
679  IndexMapType globalIndex_;
680  IndexMapType localIndexMap_;
681  IndexMapStorageType indexMaps_;
682  std::unique_ptr<SimulationDataContainer> globalReservoirState_;
683  std::unique_ptr<data::Solution> globalCellData_;
684  // this needs to be revised
685  WellStateFullyImplicitBlackoil globalWellState_;
686  // true if we are on I/O rank
687  bool isIORank_;
688  // Phase usage needed to convert solution to simulation data container
689  Opm::PhaseUsage phaseUsage_;
690  };
691 #endif // #if HAVE_OPM_GRID
692 
693 } // end namespace Opm
694 #endif
Definition: ParallelDebugOutput.hpp:72
Definition: ISTLSolver.hpp:44
virtual bool collectToIORank(const SimulationDataContainer &localReservoirState, const WellStateFullyImplicitBlackoil &localWellState, const data::Solution &localCellData, const int)
gather solution to rank 0 for EclipseWriter
Definition: ParallelDebugOutput.hpp:89
virtual bool collectToIORank(const SimulationDataContainer &localReservoirState, const WellStateFullyImplicitBlackoil &localWellState, const data::Solution &localCellData, const int wellStateStepNumber)=0
gather solution to rank 0 for EclipseWriter
Definition: ParallelDebugOutput.hpp:42
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition: AdditionalObjectDeleter.hpp:22
bool isIORank(const boost::any &parallel_info)
Return true if this is a serial run, or rank zero on an MPI run.
Definition: NewtonIterationUtilities.cpp:294
The state of a set of wells, tailored for use by the fully implicit blackoil simulator.
Definition: WellStateFullyImplicitBlackoil.hpp:44
void solutionToSim(const data::Solution &sol, const std::map< std::string, std::vector< double > > &, PhaseUsage phases, SimulationDataContainer &state)
Copies the following fields from sol into state (all conditionally): PRESSURE, TEMP, SWAT, SGAS, RS, RV, SSOL Also handles extra data such as hysteresis parameters, SOMAX, etc.
Definition: Compat.cpp:134