Actual source code: ex106.c


  2: static char help[] = "Test repeated LU factorizations. Used for checking memory leak\n\
  3:   -m <size> : problem size\n\
  4:   -mat_nonsym : use nonsymmetric matrix (default is symmetric)\n\n";

  6: #include <petscmat.h>
  7: int main(int argc,char **args)
  8: {
  9:   Mat            C,F;                /* matrix */
 10:   Vec            x,u,b;          /* approx solution, RHS, exact solution */
 11:   PetscReal      norm;             /* norm of solution error */
 12:   PetscScalar    v,none = -1.0;
 13:   PetscInt       I,J,ldim,low,high,iglobal,Istart,Iend;
 14:   PetscInt       i,j,m = 3,n = 2,its;
 15:   PetscMPIInt    size,rank;
 16:   PetscBool      mat_nonsymmetric;
 17:   PetscInt       its_max;
 18:   MatFactorInfo  factinfo;
 19:   IS             perm,iperm;

 21:   PetscInitialize(&argc,&args,(char*)0,help);
 22:   PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
 23:   MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
 24:   MPI_Comm_size(PETSC_COMM_WORLD,&size);
 25:   n    = 2*size;

 27:   /*
 28:      Set flag if we are doing a nonsymmetric problem; the default is symmetric.
 29:   */
 30:   PetscOptionsHasName(NULL,NULL,"-mat_nonsym",&mat_nonsymmetric);

 32:   /*
 33:      Create parallel matrix, specifying only its global dimensions.
 34:      When using MatCreate(), the matrix format can be specified at
 35:      runtime. Also, the parallel partitioning of the matrix is
 36:      determined by PETSc at runtime.
 37:   */
 38:   MatCreate(PETSC_COMM_WORLD,&C);
 39:   MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
 40:   MatSetFromOptions(C);
 41:   MatGetOwnershipRange(C,&Istart,&Iend);

 43:   /*
 44:      Set matrix entries matrix in parallel.
 45:       - Each processor needs to insert only elements that it owns
 46:         locally (but any non-local elements will be sent to the
 47:         appropriate processor during matrix assembly).
 48:       - Always specify global row and columns of matrix entries.
 49:   */
 50:   for (I=Istart; I<Iend; I++) {
 51:     v = -1.0; i = I/n; j = I - i*n;
 52:     if (i>0)   {J = I - n; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 53:     if (i<m-1) {J = I + n; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 54:     if (j>0)   {J = I - 1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 55:     if (j<n-1) {J = I + 1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 56:     v = 4.0; MatSetValues(C,1,&I,1,&I,&v,ADD_VALUES);
 57:   }

 59:   /*
 60:      Make the matrix nonsymmetric if desired
 61:   */
 62:   if (mat_nonsymmetric) {
 63:     for (I=Istart; I<Iend; I++) {
 64:       v = -1.5; i = I/n;
 65:       if (i>1)   {J = I-n-1; MatSetValues(C,1,&I,1,&J,&v,ADD_VALUES);}
 66:     }
 67:   } else {
 68:     MatSetOption(C,MAT_SYMMETRIC,PETSC_TRUE);
 69:     MatSetOption(C,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
 70:   }

 72:   /*
 73:      Assemble matrix, using the 2-step process:
 74:        MatAssemblyBegin(), MatAssemblyEnd()
 75:      Computations can be done while messages are in transition
 76:      by placing code between these two statements.
 77:   */
 78:   MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
 79:   MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);

 81:   its_max=1000;
 82:   /*
 83:      Create parallel vectors.
 84:       - When using VecSetSizes(), we specify only the vector's global
 85:         dimension; the parallel partitioning is determined at runtime.
 86:       - Note: We form 1 vector from scratch and then duplicate as needed.
 87:   */
 88:   VecCreate(PETSC_COMM_WORLD,&u);
 89:   VecSetSizes(u,PETSC_DECIDE,m*n);
 90:   VecSetFromOptions(u);
 91:   VecDuplicate(u,&b);
 92:   VecDuplicate(b,&x);

 94:   /*
 95:      Currently, all parallel PETSc vectors are partitioned by
 96:      contiguous chunks across the processors.  Determine which
 97:      range of entries are locally owned.
 98:   */
 99:   VecGetOwnershipRange(x,&low,&high);

101:   /*
102:     Set elements within the exact solution vector in parallel.
103:      - Each processor needs to insert only elements that it owns
104:        locally (but any non-local entries will be sent to the
105:        appropriate processor during vector assembly).
106:      - Always specify global locations of vector entries.
107:   */
108:   VecGetLocalSize(x,&ldim);
109:   for (i=0; i<ldim; i++) {
110:     iglobal = i + low;
111:     v       = (PetscScalar)(i + 100*rank);
112:     VecSetValues(u,1,&iglobal,&v,INSERT_VALUES);
113:   }

115:   /*
116:      Assemble vector, using the 2-step process:
117:        VecAssemblyBegin(), VecAssemblyEnd()
118:      Computations can be done while messages are in transition,
119:      by placing code between these two statements.
120:   */
121:   VecAssemblyBegin(u);
122:   VecAssemblyEnd(u);

124:   /* Compute right-hand-side vector */
125:   MatMult(C,u,b);

127:   MatGetOrdering(C,MATORDERINGNATURAL,&perm,&iperm);
128:   its_max = 2000;
129:   for (i=0; i<its_max; i++) {
130:     MatGetFactor(C,MATSOLVERPETSC,MAT_FACTOR_LU,&F);
131:     MatLUFactorSymbolic(F,C,perm,iperm,&factinfo);
132:     for (j=0; j<1; j++) {
133:       MatLUFactorNumeric(F,C,&factinfo);
134:     }
135:     MatSolve(F,b,x);
136:     MatDestroy(&F);
137:   }
138:   ISDestroy(&perm);
139:   ISDestroy(&iperm);

141:   /* Check the error */
142:   VecAXPY(x,none,u);
143:   VecNorm(x,NORM_2,&norm);
144:   PetscPrintf(PETSC_COMM_WORLD,"Norm of error %t\n",(double)norm);

146:   /* Free work space. */
147:   VecDestroy(&u);
148:   VecDestroy(&x);
149:   VecDestroy(&b);
150:   MatDestroy(&C);
151:   PetscFinalize();
152:   return 0;
153: }