Actual source code: ex4f.F

  1: !
  2: !     This introductory example illustrates running PETSc on a subset
  3: !     of processes
  4: !
  5: ! -----------------------------------------------------------------------

  7:       program main
  8: #include <petsc/finclude/petscsys.h>
  9:       use petscmpi  ! or mpi or mpi_f08
 10:       use petscsys
 11:       implicit none
 12:       PetscErrorCode ierr
 13:       PetscMPIInt    rank, size,grank,zero,two
 14:       PetscReal globalrank

 16: !     We must call MPI_Init() first, making us, not PETSc, responsible
 17: !     for MPI

 19:       call MPI_Init(ierr)
 20: #if defined(PETSC_HAVE_ELEMENTAL)
 21:       call PetscElementalInitializePackage(ierr)
 22: #endif
 23: !     We can now change the communicator universe for PETSc

 25:       zero = 0
 26:       two = 2
 27:       call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
 28:       call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,two),zero,            &
 29:      &     PETSC_COMM_WORLD,ierr)

 31: !     Every PETSc routine should begin with the PetscInitialize()
 32: !     routine.
 33:       call PetscInitializeNoArguments(ierr)
 34:       if (ierr .ne. 0) then
 35:          print*,'Unable to initialize PETSc'
 36:          stop
 37:       endif

 39: !     The following MPI calls return the number of processes being used
 40: !     and the rank of this process in the group.

 42:       call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
 43:       call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)

 45: !     Here we would like to print only one message that represents all
 46: !     the processes in the group. Sleep so that IO from different ranks
 47: !     don't get mixed up. Note this is not an ideal solution
 48:       call MPI_Comm_rank(MPI_COMM_WORLD,grank,ierr)
 49:       globalrank = grank
 50:       call PetscSleep(globalrank,ierr)
 51:       if (rank .eq. 0) write(6,100) size,rank
 52:  100  format('No of Procs = ',i4,' rank = ',i4)

 54: !     Always call PetscFinalize() before exiting a program.  This
 55: !     routine - finalizes the PETSc libraries as well as MPI - provides
 56: !     summary and diagnostic information if certain runtime options are
 57: !     chosen (e.g., -log_view).  See PetscFinalize() manpage for more
 58: !     information.

 60:       call PetscFinalize(ierr)
 61:       call MPI_Comm_free(PETSC_COMM_WORLD,ierr)
 62: #if defined(PETSC_HAVE_ELEMENTAL)
 63:       call PetscElementalFinalizePackage(ierr)
 64: #endif

 66: !     Since we initialized MPI, we must call MPI_Finalize()

 68:       call  MPI_Finalize(ierr)
 69:       end

 71: !/*TEST
 72: !
 73: !   test:
 74: !      nsize: 5
 75: !      filter: sort -b
 76: !      filter_output: sort -b
 77: !      requires: !cuda !saws
 78: !
 79: !TEST*/