Actual source code: ex13.c
2: static char help[] = "Solves a variable Poisson problem with KSP.\n\n";
4: /*
5: Include "petscksp.h" so that we can use KSP solvers. Note that this file
6: automatically includes:
7: petscsys.h - base PETSc routines petscvec.h - vectors
8: petscmat.h - matrices
9: petscis.h - index sets petscksp.h - Krylov subspace methods
10: petscviewer.h - viewers petscpc.h - preconditioners
11: */
12: #include <petscksp.h>
14: /*
15: User-defined context that contains all the data structures used
16: in the linear solution process.
17: */
18: typedef struct {
19: Vec x,b; /* solution vector, right-hand-side vector */
20: Mat A; /* sparse matrix */
21: KSP ksp; /* linear solver context */
22: PetscInt m,n; /* grid dimensions */
23: PetscScalar hx2,hy2; /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
24: } UserCtx;
26: extern PetscErrorCode UserInitializeLinearSolver(PetscInt,PetscInt,UserCtx*);
27: extern PetscErrorCode UserFinalizeLinearSolver(UserCtx*);
28: extern PetscErrorCode UserDoLinearSolver(PetscScalar*,UserCtx *userctx,PetscScalar *b,PetscScalar *x);
30: int main(int argc,char **args)
31: {
32: UserCtx userctx;
33: PetscInt m = 6,n = 7,t,tmax = 2,i,Ii,j,N;
34: PetscScalar *userx,*rho,*solution,*userb,hx,hy,x,y;
35: PetscReal enorm;
37: /*
38: Initialize the PETSc libraries
39: */
40: PetscInitialize(&argc,&args,(char*)0,help);
41: /*
42: The next two lines are for testing only; these allow the user to
43: decide the grid size at runtime.
44: */
45: PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
46: PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);
48: /*
49: Create the empty sparse matrix and linear solver data structures
50: */
51: UserInitializeLinearSolver(m,n,&userctx);
52: N = m*n;
54: /*
55: Allocate arrays to hold the solution to the linear system.
56: This is not normally done in PETSc programs, but in this case,
57: since we are calling these routines from a non-PETSc program, we
58: would like to reuse the data structures from another code. So in
59: the context of a larger application these would be provided by
60: other (non-PETSc) parts of the application code.
61: */
62: PetscMalloc1(N,&userx);
63: PetscMalloc1(N,&userb);
64: PetscMalloc1(N,&solution);
66: /*
67: Allocate an array to hold the coefficients in the elliptic operator
68: */
69: PetscMalloc1(N,&rho);
71: /*
72: Fill up the array rho[] with the function rho(x,y) = x; fill the
73: right-hand-side b[] and the solution with a known problem for testing.
74: */
75: hx = 1.0/(m+1);
76: hy = 1.0/(n+1);
77: y = hy;
78: Ii = 0;
79: for (j=0; j<n; j++) {
80: x = hx;
81: for (i=0; i<m; i++) {
82: rho[Ii] = x;
83: solution[Ii] = PetscSinScalar(2.*PETSC_PI*x)*PetscSinScalar(2.*PETSC_PI*y);
84: userb[Ii] = -2*PETSC_PI*PetscCosScalar(2*PETSC_PI *x)*PetscSinScalar(2*PETSC_PI*y) +
85: 8*PETSC_PI*PETSC_PI*x*PetscSinScalar(2*PETSC_PI *x)*PetscSinScalar(2*PETSC_PI*y);
86: x += hx;
87: Ii++;
88: }
89: y += hy;
90: }
92: /*
93: Loop over a bunch of timesteps, setting up and solver the linear
94: system for each time-step.
96: Note this is somewhat artificial. It is intended to demonstrate how
97: one may reuse the linear solver stuff in each time-step.
98: */
99: for (t=0; t<tmax; t++) {
100: UserDoLinearSolver(rho,&userctx,userb,userx);
102: /*
103: Compute error: Note that this could (and usually should) all be done
104: using the PETSc vector operations. Here we demonstrate using more
105: standard programming practices to show how they may be mixed with
106: PETSc.
107: */
108: enorm = 0.0;
109: for (i=0; i<N; i++) enorm += PetscRealPart(PetscConj(solution[i]-userx[i])*(solution[i]-userx[i]));
110: enorm *= PetscRealPart(hx*hy);
111: PetscPrintf(PETSC_COMM_WORLD,"m %D n %D error norm %g\n",m,n,(double)enorm);
112: }
114: /*
115: We are all finished solving linear systems, so we clean up the
116: data structures.
117: */
118: PetscFree(rho);
119: PetscFree(solution);
120: PetscFree(userx);
121: PetscFree(userb);
122: UserFinalizeLinearSolver(&userctx);
123: PetscFinalize();
124: return 0;
125: }
127: /* ------------------------------------------------------------------------*/
128: PetscErrorCode UserInitializeLinearSolver(PetscInt m,PetscInt n,UserCtx *userctx)
129: {
130: PetscInt N;
132: /*
133: Here we assume use of a grid of size m x n, with all points on the
134: interior of the domain, i.e., we do not include the points corresponding
135: to homogeneous Dirichlet boundary conditions. We assume that the domain
136: is [0,1]x[0,1].
137: */
138: userctx->m = m;
139: userctx->n = n;
140: userctx->hx2 = (m+1)*(m+1);
141: userctx->hy2 = (n+1)*(n+1);
142: N = m*n;
144: /*
145: Create the sparse matrix. Preallocate 5 nonzeros per row.
146: */
147: MatCreateSeqAIJ(PETSC_COMM_SELF,N,N,5,0,&userctx->A);
149: /*
150: Create vectors. Here we create vectors with no memory allocated.
151: This way, we can use the data structures already in the program
152: by using VecPlaceArray() subroutine at a later stage.
153: */
154: VecCreateSeqWithArray(PETSC_COMM_SELF,1,N,NULL,&userctx->b);
155: VecDuplicate(userctx->b,&userctx->x);
157: /*
158: Create linear solver context. This will be used repeatedly for all
159: the linear solves needed.
160: */
161: KSPCreate(PETSC_COMM_SELF,&userctx->ksp);
163: return 0;
164: }
166: /*
167: Solves -div (rho grad psi) = F using finite differences.
168: rho is a 2-dimensional array of size m by n, stored in Fortran
169: style by columns. userb is a standard one-dimensional array.
170: */
171: /* ------------------------------------------------------------------------*/
172: PetscErrorCode UserDoLinearSolver(PetscScalar *rho,UserCtx *userctx,PetscScalar *userb,PetscScalar *userx)
173: {
174: PetscInt i,j,Ii,J,m = userctx->m,n = userctx->n;
175: Mat A = userctx->A;
176: PC pc;
177: PetscScalar v,hx2 = userctx->hx2,hy2 = userctx->hy2;
179: /*
180: This is not the most efficient way of generating the matrix
181: but let's not worry about it. We should have separate code for
182: the four corners, each edge and then the interior. Then we won't
183: have the slow if-tests inside the loop.
185: Computes the operator
186: -div rho grad
187: on an m by n grid with zero Dirichlet boundary conditions. The rho
188: is assumed to be given on the same grid as the finite difference
189: stencil is applied. For a staggered grid, one would have to change
190: things slightly.
191: */
192: Ii = 0;
193: for (j=0; j<n; j++) {
194: for (i=0; i<m; i++) {
195: if (j>0) {
196: J = Ii - m;
197: v = -.5*(rho[Ii] + rho[J])*hy2;
198: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
199: }
200: if (j<n-1) {
201: J = Ii + m;
202: v = -.5*(rho[Ii] + rho[J])*hy2;
203: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
204: }
205: if (i>0) {
206: J = Ii - 1;
207: v = -.5*(rho[Ii] + rho[J])*hx2;
208: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
209: }
210: if (i<m-1) {
211: J = Ii + 1;
212: v = -.5*(rho[Ii] + rho[J])*hx2;
213: MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);
214: }
215: v = 2.0*rho[Ii]*(hx2+hy2);
216: MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
217: Ii++;
218: }
219: }
221: /*
222: Assemble matrix
223: */
224: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
225: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
227: /*
228: Set operators. Here the matrix that defines the linear system
229: also serves as the preconditioning matrix. Since all the matrices
230: will have the same nonzero pattern here, we indicate this so the
231: linear solvers can take advantage of this.
232: */
233: KSPSetOperators(userctx->ksp,A,A);
235: /*
236: Set linear solver defaults for this problem (optional).
237: - Here we set it to use direct LU factorization for the solution
238: */
239: KSPGetPC(userctx->ksp,&pc);
240: PCSetType(pc,PCLU);
242: /*
243: Set runtime options, e.g.,
244: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
245: These options will override those specified above as long as
246: KSPSetFromOptions() is called _after_ any other customization
247: routines.
249: Run the program with the option -help to see all the possible
250: linear solver options.
251: */
252: KSPSetFromOptions(userctx->ksp);
254: /*
255: This allows the PETSc linear solvers to compute the solution
256: directly in the user's array rather than in the PETSc vector.
258: This is essentially a hack and not highly recommend unless you
259: are quite comfortable with using PETSc. In general, users should
260: write their entire application using PETSc vectors rather than
261: arrays.
262: */
263: VecPlaceArray(userctx->x,userx);
264: VecPlaceArray(userctx->b,userb);
266: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
267: Solve the linear system
268: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
270: KSPSolve(userctx->ksp,userctx->b,userctx->x);
272: /*
273: Put back the PETSc array that belongs in the vector xuserctx->x
274: */
275: VecResetArray(userctx->x);
276: VecResetArray(userctx->b);
278: return 0;
279: }
281: /* ------------------------------------------------------------------------*/
282: PetscErrorCode UserFinalizeLinearSolver(UserCtx *userctx)
283: {
284: /*
285: We are all done and don't need to solve any more linear systems, so
286: we free the work space. All PETSc objects should be destroyed when
287: they are no longer needed.
288: */
289: KSPDestroy(&userctx->ksp);
290: VecDestroy(&userctx->x);
291: VecDestroy(&userctx->b);
292: MatDestroy(&userctx->A);
293: return 0;
294: }
296: /*TEST
298: test:
299: args: -m 19 -n 20 -ksp_gmres_cgs_refinement_type refine_always
301: TEST*/