Actual source code: dgefa4.c
2: /*
3: Inverts 4 by 4 matrix using gaussian elimination with partial pivoting.
5: Used by the sparse factorization routines in
6: src/mat/impls/baij/seq
8: This is a combination of the Linpack routines
9: dgefa() and dgedi() specialized for a size of 4.
11: */
12: #include <petscsys.h>
14: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a,PetscReal shift,PetscBool allowzeropivot,PetscBool *zeropivotdetected)
15: {
16: PetscInt i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3;
17: PetscInt k4,j3;
18: MatScalar *aa,*ax,*ay,work[16],stmp;
19: MatReal tmp,max;
21: if (zeropivotdetected) *zeropivotdetected = PETSC_FALSE;
22: shift = .25*shift*(1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15]));
24: /* Parameter adjustments */
25: a -= 5;
27: for (k = 1; k <= 3; ++k) {
28: kp1 = k + 1;
29: k3 = 4*k;
30: k4 = k3 + k;
32: /* find l = pivot index */
33: i__2 = 5 - k;
34: aa = &a[k4];
35: max = PetscAbsScalar(aa[0]);
36: l = 1;
37: for (ll=1; ll<i__2; ll++) {
38: tmp = PetscAbsScalar(aa[ll]);
39: if (tmp > max) { max = tmp; l = ll+1;}
40: }
41: l += k - 1;
42: ipvt[k-1] = l;
44: if (a[l + k3] == 0.0) {
45: if (shift == 0.0) {
46: if (allowzeropivot) {
47: PetscInfo(NULL,"Zero pivot, row %" PetscInt_FMT "\n",k-1);
48: if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
49: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %" PetscInt_FMT,k-1);
50: } else {
51: /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */
52: a[l + k3] = shift;
53: }
54: }
56: /* interchange if necessary */
57: if (l != k) {
58: stmp = a[l + k3];
59: a[l + k3] = a[k4];
60: a[k4] = stmp;
61: }
63: /* compute multipliers */
64: stmp = -1. / a[k4];
65: i__2 = 4 - k;
66: aa = &a[1 + k4];
67: for (ll=0; ll<i__2; ll++) aa[ll] *= stmp;
69: /* row elimination with column indexing */
70: ax = &a[k4+1];
71: for (j = kp1; j <= 4; ++j) {
72: j3 = 4*j;
73: stmp = a[l + j3];
74: if (l != k) {
75: a[l + j3] = a[k + j3];
76: a[k + j3] = stmp;
77: }
79: i__3 = 4 - k;
80: ay = &a[1+k+j3];
81: for (ll=0; ll<i__3; ll++) ay[ll] += stmp*ax[ll];
82: }
83: }
84: ipvt[3] = 4;
85: if (a[20] == 0.0) {
86: if (PetscLikely(allowzeropivot)) {
87: PetscInfo(NULL,"Zero pivot, row 3\n");
88: if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE;
89: } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row 3");
90: }
92: /* Now form the inverse */
93: /* compute inverse(u) */
94: for (k = 1; k <= 4; ++k) {
95: k3 = 4*k;
96: k4 = k3 + k;
97: a[k4] = 1.0 / a[k4];
98: stmp = -a[k4];
99: i__2 = k - 1;
100: aa = &a[k3 + 1];
101: for (ll=0; ll<i__2; ll++) aa[ll] *= stmp;
102: kp1 = k + 1;
103: if (4 < kp1) continue;
104: ax = aa;
105: for (j = kp1; j <= 4; ++j) {
106: j3 = 4*j;
107: stmp = a[k + j3];
108: a[k + j3] = 0.0;
109: ay = &a[j3 + 1];
110: for (ll=0; ll<k; ll++) ay[ll] += stmp*ax[ll];
111: }
112: }
114: /* form inverse(u)*inverse(l) */
115: for (kb = 1; kb <= 3; ++kb) {
116: k = 4 - kb;
117: k3 = 4*k;
118: kp1 = k + 1;
119: aa = a + k3;
120: for (i = kp1; i <= 4; ++i) {
121: work[i-1] = aa[i];
122: aa[i] = 0.0;
123: }
124: for (j = kp1; j <= 4; ++j) {
125: stmp = work[j-1];
126: ax = &a[4*j + 1];
127: ay = &a[k3 + 1];
128: ay[0] += stmp*ax[0];
129: ay[1] += stmp*ax[1];
130: ay[2] += stmp*ax[2];
131: ay[3] += stmp*ax[3];
132: }
133: l = ipvt[k-1];
134: if (l != k) {
135: ax = &a[k3 + 1];
136: ay = &a[4*l + 1];
137: stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp;
138: stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp;
139: stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp;
140: stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp;
141: }
142: }
143: return 0;
144: }
146: #if defined(PETSC_HAVE_SSE)
147: #include PETSC_HAVE_SSE
149: PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a)
150: {
151: /*
152: This routine is converted from Intel's Small Matrix Library.
153: See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix
154: Order Number: 245043-001
155: March 1999
156: https://www.intel.com/content/www/us/en/homepage.html
158: Inverse of a 4x4 matrix via Kramer's Rule:
159: bool Invert4x4(SMLXMatrix &);
160: */
161: SSE_SCOPE_BEGIN;
162: SSE_INLINE_BEGIN_1(a)
164: /* ----------------------------------------------- */
166: SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0)
167: SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0)
169: SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5)
170: SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5)
172: SSE_COPY_PS(XMM3,XMM0)
173: SSE_SHUFFLE(XMM3,XMM5,0x88)
175: SSE_SHUFFLE(XMM5,XMM0,0xDD)
177: SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0)
178: SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0)
180: SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6)
181: SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6)
183: SSE_COPY_PS(XMM4,XMM0)
184: SSE_SHUFFLE(XMM4,XMM6,0x88)
186: SSE_SHUFFLE(XMM6,XMM0,0xDD)
188: /* ----------------------------------------------- */
190: SSE_COPY_PS(XMM7,XMM4)
191: SSE_MULT_PS(XMM7,XMM6)
193: SSE_SHUFFLE(XMM7,XMM7,0xB1)
195: SSE_COPY_PS(XMM0,XMM5)
196: SSE_MULT_PS(XMM0,XMM7)
198: SSE_COPY_PS(XMM2,XMM3)
199: SSE_MULT_PS(XMM2,XMM7)
201: SSE_SHUFFLE(XMM7,XMM7,0x4E)
203: SSE_COPY_PS(XMM1,XMM5)
204: SSE_MULT_PS(XMM1,XMM7)
205: SSE_SUB_PS(XMM1,XMM0)
207: SSE_MULT_PS(XMM7,XMM3)
208: SSE_SUB_PS(XMM7,XMM2)
210: SSE_SHUFFLE(XMM7,XMM7,0x4E)
211: SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7)
213: /* ----------------------------------------------- */
215: SSE_COPY_PS(XMM0,XMM5)
216: SSE_MULT_PS(XMM0,XMM4)
218: SSE_SHUFFLE(XMM0,XMM0,0xB1)
220: SSE_COPY_PS(XMM2,XMM6)
221: SSE_MULT_PS(XMM2,XMM0)
222: SSE_ADD_PS(XMM2,XMM1)
224: SSE_COPY_PS(XMM7,XMM3)
225: SSE_MULT_PS(XMM7,XMM0)
227: SSE_SHUFFLE(XMM0,XMM0,0x4E)
229: SSE_COPY_PS(XMM1,XMM6)
230: SSE_MULT_PS(XMM1,XMM0)
231: SSE_SUB_PS(XMM2,XMM1)
233: SSE_MULT_PS(XMM0,XMM3)
234: SSE_SUB_PS(XMM0,XMM7)
236: SSE_SHUFFLE(XMM0,XMM0,0x4E)
237: SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0)
239: /* ----------------------------------------------- */
241: SSE_COPY_PS(XMM7,XMM5)
242: SSE_SHUFFLE(XMM7,XMM5,0x4E)
243: SSE_MULT_PS(XMM7,XMM6)
245: SSE_SHUFFLE(XMM7,XMM7,0xB1)
247: SSE_SHUFFLE(XMM4,XMM4,0x4E)
249: SSE_COPY_PS(XMM0,XMM4)
250: SSE_MULT_PS(XMM0,XMM7)
251: SSE_ADD_PS(XMM0,XMM2)
253: SSE_COPY_PS(XMM2,XMM3)
254: SSE_MULT_PS(XMM2,XMM7)
256: SSE_SHUFFLE(XMM7,XMM7,0x4E)
258: SSE_COPY_PS(XMM1,XMM4)
259: SSE_MULT_PS(XMM1,XMM7)
260: SSE_SUB_PS(XMM0,XMM1)
261: SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0)
263: SSE_MULT_PS(XMM7,XMM3)
264: SSE_SUB_PS(XMM7,XMM2)
266: SSE_SHUFFLE(XMM7,XMM7,0x4E)
268: /* ----------------------------------------------- */
270: SSE_COPY_PS(XMM1,XMM3)
271: SSE_MULT_PS(XMM1,XMM5)
273: SSE_SHUFFLE(XMM1,XMM1,0xB1)
275: SSE_COPY_PS(XMM0,XMM6)
276: SSE_MULT_PS(XMM0,XMM1)
277: SSE_ADD_PS(XMM0,XMM7)
279: SSE_COPY_PS(XMM2,XMM4)
280: SSE_MULT_PS(XMM2,XMM1)
281: SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12)
283: SSE_SHUFFLE(XMM1,XMM1,0x4E)
285: SSE_COPY_PS(XMM7,XMM6)
286: SSE_MULT_PS(XMM7,XMM1)
287: SSE_SUB_PS(XMM7,XMM0)
289: SSE_MULT_PS(XMM1,XMM4)
290: SSE_SUB_PS(XMM2,XMM1)
291: SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2)
293: /* ----------------------------------------------- */
295: SSE_COPY_PS(XMM1,XMM3)
296: SSE_MULT_PS(XMM1,XMM6)
298: SSE_SHUFFLE(XMM1,XMM1,0xB1)
300: SSE_COPY_PS(XMM2,XMM4)
301: SSE_MULT_PS(XMM2,XMM1)
302: SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0)
303: SSE_SUB_PS(XMM0,XMM2)
305: SSE_COPY_PS(XMM2,XMM5)
306: SSE_MULT_PS(XMM2,XMM1)
307: SSE_ADD_PS(XMM2,XMM7)
309: SSE_SHUFFLE(XMM1,XMM1,0x4E)
311: SSE_COPY_PS(XMM7,XMM4)
312: SSE_MULT_PS(XMM7,XMM1)
313: SSE_ADD_PS(XMM7,XMM0)
315: SSE_MULT_PS(XMM1,XMM5)
316: SSE_SUB_PS(XMM2,XMM1)
318: /* ----------------------------------------------- */
320: SSE_MULT_PS(XMM4,XMM3)
322: SSE_SHUFFLE(XMM4,XMM4,0xB1)
324: SSE_COPY_PS(XMM1,XMM6)
325: SSE_MULT_PS(XMM1,XMM4)
326: SSE_ADD_PS(XMM1,XMM7)
328: SSE_COPY_PS(XMM0,XMM5)
329: SSE_MULT_PS(XMM0,XMM4)
330: SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7)
331: SSE_SUB_PS(XMM7,XMM0)
333: SSE_SHUFFLE(XMM4,XMM4,0x4E)
335: SSE_MULT_PS(XMM6,XMM4)
336: SSE_SUB_PS(XMM1,XMM6)
338: SSE_MULT_PS(XMM5,XMM4)
339: SSE_ADD_PS(XMM5,XMM7)
341: /* ----------------------------------------------- */
343: SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0)
344: SSE_MULT_PS(XMM3,XMM0)
346: SSE_COPY_PS(XMM4,XMM3)
347: SSE_SHUFFLE(XMM4,XMM3,0x4E)
348: SSE_ADD_PS(XMM4,XMM3)
350: SSE_COPY_PS(XMM6,XMM4)
351: SSE_SHUFFLE(XMM6,XMM4,0xB1)
352: SSE_ADD_SS(XMM6,XMM4)
354: SSE_COPY_PS(XMM3,XMM6)
355: SSE_RECIP_SS(XMM3,XMM6)
356: SSE_COPY_SS(XMM4,XMM3)
357: SSE_ADD_SS(XMM4,XMM3)
358: SSE_MULT_SS(XMM3,XMM3)
359: SSE_MULT_SS(XMM6,XMM3)
360: SSE_SUB_SS(XMM4,XMM6)
362: SSE_SHUFFLE(XMM4,XMM4,0x00)
364: SSE_MULT_PS(XMM0,XMM4)
365: SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0)
366: SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0)
368: SSE_MULT_PS(XMM1,XMM4)
369: SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1)
370: SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1)
372: SSE_MULT_PS(XMM2,XMM4)
373: SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2)
374: SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2)
376: SSE_MULT_PS(XMM4,XMM5)
377: SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4)
378: SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4)
380: /* ----------------------------------------------- */
382: SSE_INLINE_END_1;
383: SSE_SCOPE_END;
384: return 0;
385: }
387: #endif