Actual source code: mpi.h
petsc-3.11.3 2019-06-26
1: /*
2: This is a special set of bindings for uni-processor use of MPI by the PETSc library.
4: NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
6: For example,
7: * Does not implement send to self.
8: * Does not implement attributes correctly.
9: */
11: /*
12: The following info is a response to one of the petsc-maint questions
13: regarding MPIUNI.
15: MPIUNI was developed with the aim of getting PETSc compiled, and
16: usable in the absence of a full MPI implementation. With this, we
17: were able to provide PETSc on Windows, Windows64 even before any MPI
18: implementation was available on these platforms. [Or with certain
19: compilers - like borland, that do not have a usable MPI
20: implementation]
22: However - providing a seqential, standards compliant MPI
23: implementation is *not* the goal of MPIUNI. The development strategy
24: was - to make enough changes to it so that PETSc sources, examples
25: compile without errors, and runs in the uni-processor mode. This is
26: the reason each function is not documented.
28: PETSc usage of MPIUNI is primarily from C. However a minimal fortran
29: interface is also provided - to get PETSc fortran examples with a
30: few MPI calls working.
32: One of the optimzation with MPIUNI, is to avoid the function call
33: overhead, when possible. Hence most of the C functions are
34: implemented as macros. However the function calls cannot be avoided
35: with fortran usage.
37: Most PETSc objects have both sequential and parallel
38: implementations, which are separate. For eg: We have two types of
39: sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
40: routines are used in the Seq part, but most of them are used in the
41: MPI part. The send/receive calls can be found mostly in the MPI
42: part.
44: When MPIUNI is used, only the Seq version of the PETSc objects are
45: used, even though the MPI variant of the objects are compiled. Since
46: there are no send/receive calls in the Seq variant, PETSc works fine
47: with MPIUNI in seq mode.
49: The reason some send/receive functions are defined to abort(), is to
50: detect sections of code that use send/receive functions, and gets
51: executed in the sequential mode. (which shouldn't happen in case of
52: PETSc).
54: Proper implementation of send/receive would involve writing a
55: function for each of them. Inside each of these functions, we have
56: to check if the send is to self or receive is from self, and then
57: doing the buffering accordingly (until the receive is called) - or
58: what if a nonblocking receive is called, do a copy etc.. Handling
59: the buffering aspects might be complicated enough, that in this
60: case, a proper implementation of MPI might as well be used. This is
61: the reason the send to self is not implemented in MPIUNI, and never
62: will be.
64: Proper implementations of MPI [for eg: MPICH & OpenMPI] are
65: available for most machines. When these packages are available, Its
66: generally preferable to use one of them instead of MPIUNI - even if
67: the user is using PETSc sequentially.
69: - MPIUNI does not support all MPI functions [or functionality].
70: Hence it might not work with external packages or user code that
71: might have MPI calls in it.
73: - MPIUNI is not a standards compliant implementation for np=1.
74: For eg: if the user code has send/recv to self, then it will
75: abort. [Similar issues with a number of other MPI functionality]
76: However MPICH & OpenMPI are the correct implementations of MPI
77: standard for np=1.
79: - When user code uses multiple MPI based packages that have their
80: own *internal* stubs equivalent to MPIUNI - in sequential mode,
81: invariably these multiple implementations of MPI for np=1 conflict
82: with each other. The correct thing to do is: make all such
83: packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
84: satisfy this requirement correctly [and hence the correct choice].
86: - Using MPICH/OpenMPI sequentially should have minimal
87: disadvantages. [for eg: these binaries can be run without
88: mpirun/mpiexec as ./executable, without requiring any extra
89: configurations for ssh/rsh/daemons etc..]. This should not be a
90: reason to avoid these packages for sequential use.
92: Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
93: - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
94: - remove reference to petscconf.h from mpi.h
95: - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
96: - ar cr libmpiuni.a mpi.o
98: */
103: /* Required by abort() in mpi.c & for win64 */
104: #include <petscconf.h>
105: #include <stddef.h>
107: /* This is reproduced from petscsys.h so that mpi.h can be used standalone without first including petscsys.h */
108: #if defined(_WIN32) && defined(PETSC_USE_SHARED_LIBRARIES)
109: # define MPIUni_ __declspec(dllexport)
110: # define MPIUni_PETSC_DLLIMPORT __declspec(dllimport)
111: #elif defined(PETSC_USE_VISIBILITY_CXX) && defined(__cplusplus)
112: # define MPIUni_ __attribute__((visibility ("default")))
113: # define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
114: #elif defined(PETSC_USE_VISIBILITY_C) && !defined(__cplusplus)
115: # define MPIUni_ __attribute__((visibility ("default")))
116: # define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
117: #else
118: # define MPIUni_
119: # define MPIUni_PETSC_DLLIMPORT
120: #endif
122: #if defined(petsc_EXPORTS)
123: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_
124: #else /* Win32 users need this to import symbols from petsc.dll */
125: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_PETSC_DLLIMPORT
126: #endif
128: #if defined(__cplusplus)
129: #define MPIUni_PETSC_EXTERN extern "C" MPIUni_PETSC_VISIBILITY_PUBLIC
130: #else
131: #define MPIUni_PETSC_EXTERN extern MPIUni_PETSC_VISIBILITY_PUBLIC
132: #endif
134: #if defined(__cplusplus)
135: extern "C" {
136: #endif
138: /* MPI_Aint has to be an signed integral type large enough to hold a pointer */
139: #if PETSC_SIZEOF_INT == PETSC_SIZEOF_VOID_P
140: typedef int MPI_Aint;
141: #elif PETSC_SIZEOF_LONG == PETSC_SIZEOF_VOID_P
142: typedef long MPI_Aint;
143: #else
144: typedef ptrdiff_t MPI_Aint;
145: #endif
147: /* old 32bit MS compiler does not support long long */
148: #if defined(PETSC_SIZEOF_LONG_LONG)
149: typedef long long MPIUNI_INT64;
150: typedef unsigned long long MPIUNI_UINT64;
151: #elif defined(PETSC_HAVE___INT64)
152: typedef _int64 MPIUNI_INT64;
153: typedef unsigned _int64 MPIUNI_UINT64;
154: #else
155: #error "cannot determine MPIUNI_INT64, MPIUNI_UINT64 types"
156: #endif
158: /*
160: MPIUNI_ARG is used in the macros below only to stop various C/C++ compilers
161: from generating warning messages about unused variables while compiling PETSc.
162: */
163: MPIUni_PETSC_EXTERN void *MPIUNI_TMP;
164: #define MPIUNI_ARG(arg) (MPIUNI_TMP = (void *)(MPI_Aint) (arg))
166: #define MPI_IDENT 0
167: #define MPI_CONGRUENT 1
168: #define MPI_SIMILAR 2
169: #define MPI_UNEQUAL 3
171: #define MPI_BOTTOM ((void *) 0)
172: #define MPI_IN_PLACE ((void *)-1)
174: #define MPI_PROC_NULL (-1)
175: #define MPI_ANY_SOURCE (-2)
176: #define MPI_ANY_TAG (-1)
177: #define MPI_UNDEFINED (-32766)
179: #define MPI_SUCCESS 0
180: #define MPI_ERR_OTHER 17
181: #define MPI_ERR_UNKNOWN 18
182: #define MPI_ERR_INTERN 21
184: #define MPI_KEYVAL_INVALID 0
185: #define MPI_TAG_UB 0
187: #define MPI_MAX_PROCESSOR_NAME 1024
188: #define MPI_MAX_ERROR_STRING 2056
190: typedef int MPI_Comm;
191: #define MPI_COMM_NULL 0
192: #define MPI_COMM_SELF 1
193: #define MPI_COMM_WORLD 2
194: #define MPI_COMM_TYPE_SHARED 1
196: typedef int MPI_Info;
197: #define MPI_INFO_NULL 0
199: typedef struct {int MPI_SOURCE,MPI_TAG,MPI_ERROR;} MPI_Status;
200: #define MPI_STATUS_IGNORE (MPI_Status *)0
201: #define MPI_STATUSES_IGNORE (MPI_Status *)0
203: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
204: /* Any changes here must also be reflected in mpif.h */
205: typedef int MPI_Datatype;
206: #define MPI_DATATYPE_NULL 0
207: #define MPI_PACKED 0
209: #define MPI_FLOAT (1 << 20 | 1 << 8 | (int)sizeof(float))
210: #define MPI_DOUBLE (1 << 20 | 1 << 8 | (int)sizeof(double))
211: #define MPI_LONG_DOUBLE (1 << 20 | 1 << 8 | (int)sizeof(long double))
213: #define MPI_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
214: #define MPI_C_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
215: #define MPI_C_FLOAT_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
216: #define MPI_DOUBLE_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
217: #define MPI_C_DOUBLE_COMPLEX (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
219: #define MPI_CHAR (3 << 20 | 1 << 8 | (int)sizeof(char))
220: #define MPI_BYTE (3 << 20 | 1 << 8 | (int)sizeof(char))
221: #define MPI_SIGNED_CHAR (3 << 20 | 1 << 8 | (int)sizeof(signed char))
222: #define MPI_UNSIGNED_CHAR (3 << 20 | 1 << 8 | (int)sizeof(unsigned char))
224: #define MPI_SHORT (4 << 20 | 1 << 8 | (int)sizeof(short))
225: #define MPI_INT (4 << 20 | 1 << 8 | (int)sizeof(int))
226: #define MPI_LONG (4 << 20 | 1 << 8 | (int)sizeof(long))
227: #define MPI_LONG_LONG (4 << 20 | 1 << 8 | (int)sizeof(MPIUNI_INT64))
228: #define MPI_LONG_LONG_INT MPI_LONG_LONG
229: #define MPI_INTEGER8 MPI_LONG_LONG
231: #define MPI_UNSIGNED_SHORT (5 << 20 | 1 << 8 | (int)sizeof(unsigned short))
232: #define MPI_UNSIGNED (5 << 20 | 1 << 8 | (int)sizeof(unsigned))
233: #define MPI_UNSIGNED_LONG (5 << 20 | 1 << 8 | (int)sizeof(unsigned long))
234: #define MPI_UNSIGNED_LONG_LONG (5 << 20 | 1 << 8 | (int)sizeof(MPIUNI_UINT64))
236: #define MPI_FLOAT_INT (10 << 20 | 1 << 8 | (int)(sizeof(float) + sizeof(int)))
237: #define MPI_DOUBLE_INT (11 << 20 | 1 << 8 | (int)(sizeof(double) + sizeof(int)))
238: #define MPI_LONG_INT (12 << 20 | 1 << 8 | (int)(sizeof(long) + sizeof(int)))
239: #define MPI_SHORT_INT (13 << 20 | 1 << 8 | (int)(sizeof(short) + sizeof(int)))
240: #define MPI_2INT (14 << 20 | 1 << 8 | (int)(2*sizeof(int)))
241: #define MPI_2DOUBLE (15 << 20 | 1 << 8 | (int)(2*sizeof(double)))
243: /* Fortran datatypes; Jed Brown says they should be defined here */
244: #define MPI_INTEGER MPI_INT
245: #define MPI_DOUBLE_PRECISION MPI_DOUBLE
246: #define MPI_COMPLEX16 MPI_C_DOUBLE_COMPLEX
247: #define MPI_2DOUBLE_PRECISION MPI_2DOUBLE
249: #define MPI_ORDER_C 0
250: #define MPI_ORDER_FORTRAN 1
252: #define MPI_sizeof_default(datatype) ((((datatype) >> 8) & 0xfff) * ((datatype) & 0xff))
253: #if defined(PETSC_USE_REAL___FP16)
254: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FP16;
255: #define MPI_sizeof(datatype) ((datatype == MPIU___FP16) ? (int)(2*sizeof(char)) : MPI_sizeof_default(datatype))
256: #elif defined(PETSC_USE_REAL___FLOAT128)
257: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FLOAT128;
258: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? (int)(2*sizeof(double)) : MPI_sizeof_default(datatype))
259: #else
260: #define MPI_sizeof(datatype) (MPI_sizeof_default(datatype))
261: #endif
263: MPIUni_PETSC_EXTERN int MPIUNI_Memcpy(void*,const void*,int);
265: typedef int MPI_Request;
266: #define MPI_REQUEST_NULL 0
268: typedef int MPI_Group;
269: #define MPI_GROUP_NULL 0
270: #define MPI_GROUP_EMPTY 0
272: typedef int MPI_Op;
273: #define MPI_OP_NULL 0
274: #define MPI_SUM 1
275: #define MPI_MAX 2
276: #define MPI_MIN 3
277: #define MPI_REPLACE 4
278: #define MPI_PROD 5
279: #define MPI_LAND 6
280: #define MPI_BAND 7
281: #define MPI_LOR 8
282: #define MPI_BOR 9
283: #define MPI_LXOR 10
284: #define MPI_BXOR 11
285: #define MPI_MAXLOC 12
286: #define MPI_MINLOC 13
288: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
290: typedef int MPI_Errhandler;
291: #define MPI_ERRHANDLER_NULL 0
292: #define MPI_ERRORS_RETURN 0
293: #define MPI_ERRORS_ARE_FATAL 0
294: typedef void (MPI_Handler_function)(MPI_Comm *, int *, ...);
296: /*
297: Prototypes of some functions which are implemented in mpi.c
298: */
299: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
300: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
301: #define MPI_NULL_COPY_FN (MPI_Copy_function*)0
302: #define MPI_NULL_DELETE_FN (MPI_Delete_function*)0
304: /*
305: To enable linking PETSc+MPIUNI with any other package that might have its
306: own MPIUNI (equivalent implementation) we need to avoid using 'MPI'
307: namespace for MPIUNI functions that go into the petsc library.
309: For C functions below (that get compiled into petsc library) - we map
310: the 'MPI' functions to use 'Petsc_MPI' namespace.
312: With fortran we use similar mapping - thus requiring the use of
313: c-preprocessor with mpif.h
314: */
315: #define MPI_Abort Petsc_MPI_Abort
316: #define MPIUni_Abort Petsc_MPIUni_Abort
317: #define MPI_Attr_get Petsc_MPI_Attr_get
318: #define MPI_Keyval_free Petsc_MPI_Keyval_free
319: #define MPI_Attr_put Petsc_MPI_Attr_put
320: #define MPI_Attr_delete Petsc_MPI_Attr_delete
321: #define MPI_Keyval_create Petsc_MPI_Keyval_create
322: #define MPI_Comm_free Petsc_MPI_Comm_free
323: #define MPI_Comm_dup Petsc_MPI_Comm_dup
324: #define MPI_Comm_create Petsc_MPI_Comm_create
325: #define MPI_Init Petsc_MPI_Init
326: #define MPI_Finalize Petsc_MPI_Finalize
327: #define MPI_Initialized Petsc_MPI_Initialized
328: #define MPI_Finalized Petsc_MPI_Finalized
329: #define MPI_Comm_size Petsc_MPI_Comm_size
330: #define MPI_Comm_rank Petsc_MPI_Comm_rank
331: #define MPI_Wtime Petsc_MPI_Wtime
332: #define MPI_Type_get_envelope Petsc_MPI_Type_get_envelope
333: #define MPI_Type_get_contents Petsc_MPI_Type_get_contents
334: #define MPI_Add_error_class Petsc_MPI_Add_error_class
335: #define MPI_Add_error_code Petsc_MPI_Add_error_code
337: /* identical C bindings */
338: #define MPI_Comm_copy_attr_function MPI_Copy_function
339: #define MPI_Comm_delete_attr_function MPI_Delete_function
340: #define MPI_COMM_NULL_COPY_FN MPI_NULL_COPY_FN
341: #define MPI_COMM_NULL_DELETE_FN MPI_NULL_DELETE_FN
342: #define MPI_Comm_create_keyval Petsc_MPI_Keyval_create
343: #define MPI_Comm_free_keyval Petsc_MPI_Keyval_free
344: #define MPI_Comm_get_attr Petsc_MPI_Attr_get
345: #define MPI_Comm_set_attr Petsc_MPI_Attr_put
346: #define MPI_Comm_delete_attr Petsc_MPI_Attr_delete
348: MPIUni_PETSC_EXTERN int MPIUni_Abort(MPI_Comm,int);
349: MPIUni_PETSC_EXTERN int MPI_Abort(MPI_Comm,int);
350: MPIUni_PETSC_EXTERN int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
351: MPIUni_PETSC_EXTERN int MPI_Keyval_free(int*);
352: MPIUni_PETSC_EXTERN int MPI_Attr_put(MPI_Comm,int,void *);
353: MPIUni_PETSC_EXTERN int MPI_Attr_delete(MPI_Comm,int);
354: MPIUni_PETSC_EXTERN int MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
355: MPIUni_PETSC_EXTERN int MPI_Comm_free(MPI_Comm*);
356: MPIUni_PETSC_EXTERN int MPI_Comm_dup(MPI_Comm,MPI_Comm *);
357: MPIUni_PETSC_EXTERN int MPI_Comm_create(MPI_Comm,MPI_Group,MPI_Comm *);
358: MPIUni_PETSC_EXTERN int MPI_Init(int *, char ***);
359: MPIUni_PETSC_EXTERN int MPI_Finalize(void);
360: MPIUni_PETSC_EXTERN int MPI_Initialized(int*);
361: MPIUni_PETSC_EXTERN int MPI_Finalized(int*);
362: MPIUni_PETSC_EXTERN int MPI_Comm_size(MPI_Comm,int*);
363: MPIUni_PETSC_EXTERN int MPI_Comm_rank(MPI_Comm,int*);
364: MPIUni_PETSC_EXTERN double MPI_Wtime(void);
366: MPIUni_PETSC_EXTERN int MPI_Type_get_envelope(MPI_Datatype,int*,int*,int*,int*);
367: MPIUni_PETSC_EXTERN int MPI_Type_get_contents(MPI_Datatype,int,int,int,int*,MPI_Aint*,MPI_Datatype*);
368: MPIUni_PETSC_EXTERN int MPI_Add_error_class(int*);
369: MPIUni_PETSC_EXTERN int MPI_Add_error_code(int,int*);
371: /*
372: Routines we have replace with macros that do nothing
373: Some return error codes others return success
374: */
376: typedef int MPI_Fint;
377: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
378: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
379: #define MPI_Type_f2c(type) (MPI_Datatype)(type)
380: #define MPI_Type_c2f(type) (MPI_Fint)(type)
381: #define MPI_Op_f2c(op) (MPI_Op)(op)
382: #define MPI_Op_c2f(op) (MPI_Fint)(op)
384: #define MPI_Send(buf,count,datatype,dest,tag,comm) \
385: (MPIUNI_ARG(buf),\
386: MPIUNI_ARG(count),\
387: MPIUNI_ARG(datatype),\
388: MPIUNI_ARG(dest),\
389: MPIUNI_ARG(tag),\
390: MPIUNI_ARG(comm),\
391: MPIUni_Abort(MPI_COMM_WORLD,0))
392: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
393: (MPIUNI_ARG(buf),\
394: MPIUNI_ARG(count),\
395: MPIUNI_ARG(datatype),\
396: MPIUNI_ARG(source),\
397: MPIUNI_ARG(tag),\
398: MPIUNI_ARG(comm),\
399: MPIUNI_ARG(status),\
400: MPIUni_Abort(MPI_COMM_WORLD,0))
401: #define MPI_Get_count(status,datatype,count) \
402: (MPIUNI_ARG(status),\
403: MPIUNI_ARG(datatype),\
404: MPIUNI_ARG(count),\
405: MPIUni_Abort(MPI_COMM_WORLD,0))
406: #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
407: (MPIUNI_ARG(buf),\
408: MPIUNI_ARG(count),\
409: MPIUNI_ARG(datatype),\
410: MPIUNI_ARG(dest),\
411: MPIUNI_ARG(tag),\
412: MPIUNI_ARG(comm),\
413: MPIUni_Abort(MPI_COMM_WORLD,0))
414: #define MPI_Ssend(buf,count,datatype,dest,tag,comm) \
415: (MPIUNI_ARG(buf),\
416: MPIUNI_ARG(count),\
417: MPIUNI_ARG(datatype),\
418: MPIUNI_ARG(dest),\
419: MPIUNI_ARG(tag),\
420: MPIUNI_ARG(comm),\
421: MPIUni_Abort(MPI_COMM_WORLD,0))
422: #define MPI_Rsend(buf,count,datatype,dest,tag,comm) \
423: (MPIUNI_ARG(buf),\
424: MPIUNI_ARG(count),\
425: MPIUNI_ARG(datatype),\
426: MPIUNI_ARG(dest),\
427: MPIUNI_ARG(tag),\
428: MPIUNI_ARG(comm),\
429: MPIUni_Abort(MPI_COMM_WORLD,0))
430: #define MPI_Buffer_attach(buffer,size) \
431: (MPIUNI_ARG(buffer),\
432: MPIUNI_ARG(size),\
433: MPI_SUCCESS)
434: #define MPI_Buffer_detach(buffer,size)\
435: (MPIUNI_ARG(buffer),\
436: MPIUNI_ARG(size),\
437: MPI_SUCCESS)
438: #define MPI_Ibsend(buf,count,datatype,dest,tag,comm,request) \
439: (MPIUNI_ARG(buf),\
440: MPIUNI_ARG(count),\
441: MPIUNI_ARG(datatype),\
442: MPIUNI_ARG(dest),\
443: MPIUNI_ARG(tag),\
444: MPIUNI_ARG(comm),\
445: MPIUNI_ARG(request),\
446: MPIUni_Abort(MPI_COMM_WORLD,0))
447: #define MPI_Issend(buf,count,datatype,dest,tag,comm,request) \
448: (MPIUNI_ARG(buf),\
449: MPIUNI_ARG(count),\
450: MPIUNI_ARG(datatype),\
451: MPIUNI_ARG(dest),\
452: MPIUNI_ARG(tag),\
453: MPIUNI_ARG(comm),\
454: MPIUNI_ARG(request),\
455: MPIUni_Abort(MPI_COMM_WORLD,0))
456: #define MPI_Irsend(buf,count,datatype,dest,tag,comm,request) \
457: (MPIUNI_ARG(buf),\
458: MPIUNI_ARG(count),\
459: MPIUNI_ARG(datatype),\
460: MPIUNI_ARG(dest),\
461: MPIUNI_ARG(tag),\
462: MPIUNI_ARG(comm),\
463: MPIUNI_ARG(request),\
464: MPIUni_Abort(MPI_COMM_WORLD,0))
465: #define MPI_Irecv(buf,count,datatype,source,tag,comm,request) \
466: (MPIUNI_ARG(buf),\
467: MPIUNI_ARG(count),\
468: MPIUNI_ARG(datatype),\
469: MPIUNI_ARG(source),\
470: MPIUNI_ARG(tag),\
471: MPIUNI_ARG(comm),\
472: MPIUNI_ARG(request),\
473: MPIUni_Abort(MPI_COMM_WORLD,0))
474: #define MPI_Isend(buf,count,datatype,dest,tag,comm,request) \
475: (MPIUNI_ARG(buf),\
476: MPIUNI_ARG(count),\
477: MPIUNI_ARG(datatype),\
478: MPIUNI_ARG(dest),\
479: MPIUNI_ARG(tag),\
480: MPIUNI_ARG(comm),\
481: MPIUNI_ARG(request),\
482: MPIUni_Abort(MPI_COMM_WORLD,0))
483: #define MPI_Wait(request,status) \
484: (MPIUNI_ARG(request),\
485: MPIUNI_ARG(status),\
486: MPI_SUCCESS)
487: #define MPI_Test(request,flag,status) \
488: (MPIUNI_ARG(request),\
489: MPIUNI_ARG(status),\
490: *(flag) = 0,\
491: MPI_SUCCESS)
492: #define MPI_Request_free(request) \
493: (MPIUNI_ARG(request),\
494: MPI_SUCCESS)
495: #define MPI_Waitany(count,array_of_requests,index,status) \
496: (MPIUNI_ARG(count),\
497: MPIUNI_ARG(array_of_requests),\
498: MPIUNI_ARG(status),\
499: (*(status)).MPI_SOURCE = 0, \
500: *(index) = 0,\
501: MPI_SUCCESS)
502: #define MPI_Testany(a,b,c,d,e) \
503: (MPIUNI_ARG(a),\
504: MPIUNI_ARG(b),\
505: MPIUNI_ARG(c),\
506: MPIUNI_ARG(d),\
507: MPIUNI_ARG(e),\
508: MPI_SUCCESS)
509: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
510: (MPIUNI_ARG(count),\
511: MPIUNI_ARG(array_of_requests),\
512: MPIUNI_ARG(array_of_statuses),\
513: MPI_SUCCESS)
514: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
515: (MPIUNI_ARG(count),\
516: MPIUNI_ARG(array_of_requests),\
517: MPIUNI_ARG(flag),\
518: MPIUNI_ARG(array_of_statuses),\
519: MPI_SUCCESS)
520: #define MPI_Waitsome(incount,array_of_requests,outcount,\
521: array_of_indices,array_of_statuses) \
522: (MPIUNI_ARG(incount),\
523: MPIUNI_ARG(array_of_requests),\
524: MPIUNI_ARG(outcount),\
525: MPIUNI_ARG(array_of_indices),\
526: MPIUNI_ARG(array_of_statuses),\
527: MPI_SUCCESS)
528: #define MPI_Comm_group(comm,group) \
529: (MPIUNI_ARG(comm),\
530: *group = 1,\
531: MPI_SUCCESS)
532: #define MPI_Group_incl(group,n,ranks,newgroup) \
533: (MPIUNI_ARG(group),\
534: MPIUNI_ARG(n),\
535: MPIUNI_ARG(ranks),\
536: MPIUNI_ARG(newgroup),\
537: MPI_SUCCESS)
538: #define MPI_Testsome(incount,array_of_requests,outcount,\
539: array_of_indices,array_of_statuses) \
540: (MPIUNI_ARG(incount),\
541: MPIUNI_ARG(array_of_requests),\
542: MPIUNI_ARG(outcount),\
543: MPIUNI_ARG(array_of_indices),\
544: MPIUNI_ARG(array_of_statuses),\
545: MPI_SUCCESS)
546: #define MPI_Iprobe(source,tag,comm,flag,status) \
547: (MPIUNI_ARG(source),\
548: MPIUNI_ARG(tag),\
549: MPIUNI_ARG(comm),\
550: *(flag)=0,\
551: MPIUNI_ARG(status),\
552: MPI_SUCCESS)
553: #define MPI_Probe(source,tag,comm,status) \
554: (MPIUNI_ARG(source),\
555: MPIUNI_ARG(tag),\
556: MPIUNI_ARG(comm),\
557: MPIUNI_ARG(status),\
558: MPI_SUCCESS)
559: #define MPI_Cancel(request) \
560: (MPIUNI_ARG(request),\
561: MPI_SUCCESS)
562: #define MPI_Test_cancelled(status,flag) \
563: (MPIUNI_ARG(status),\
564: *(flag)=0,\
565: MPI_SUCCESS)
566: #define MPI_Send_init(buf,count,datatype,dest,tag,comm,request) \
567: (MPIUNI_ARG(buf),\
568: MPIUNI_ARG(count),\
569: MPIUNI_ARG(datatype),\
570: MPIUNI_ARG(dest),\
571: MPIUNI_ARG(tag),\
572: MPIUNI_ARG(comm),\
573: MPIUNI_ARG(request),\
574: MPI_SUCCESS)
575: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
576: (MPIUNI_ARG(buf),\
577: MPIUNI_ARG(count),\
578: MPIUNI_ARG(datatype),\
579: MPIUNI_ARG(dest),\
580: MPIUNI_ARG(tag),\
581: MPIUNI_ARG(comm),\
582: MPIUNI_ARG(request),\
583: MPI_SUCCESS)
584: #define MPI_Ssend_init(buf,count,datatype,dest,tag,comm,request) \
585: (MPIUNI_ARG(buf),\
586: MPIUNI_ARG(count),\
587: MPIUNI_ARG(datatype),\
588: MPIUNI_ARG(dest),\
589: MPIUNI_ARG(tag),\
590: MPIUNI_ARG(comm),\
591: MPIUNI_ARG(request),\
592: MPI_SUCCESS)
593: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
594: (MPIUNI_ARG(buf),\
595: MPIUNI_ARG(count),\
596: MPIUNI_ARG(datatype),\
597: MPIUNI_ARG(dest),\
598: MPIUNI_ARG(tag),\
599: MPIUNI_ARG(comm),\
600: MPIUNI_ARG(request),\
601: MPI_SUCCESS)
602: #define MPI_Rsend_init(buf,count,datatype,dest,tag,comm,request) \
603: (MPIUNI_ARG(buf),\
604: MPIUNI_ARG(count),\
605: MPIUNI_ARG(datatype),\
606: MPIUNI_ARG(dest),\
607: MPIUNI_ARG(tag),\
608: MPIUNI_ARG(comm),\
609: MPIUNI_ARG(request),\
610: MPI_SUCCESS)
611: #define MPI_Recv_init(buf,count,datatype,source,tag,comm,request) \
612: (MPIUNI_ARG(buf),\
613: MPIUNI_ARG(count),\
614: MPIUNI_ARG(datatype),\
615: MPIUNI_ARG(source),\
616: MPIUNI_ARG(tag),\
617: MPIUNI_ARG(comm),\
618: MPIUNI_ARG(request),\
619: MPI_SUCCESS)
620: #define MPI_Start(request) \
621: (MPIUNI_ARG(request),\
622: MPI_SUCCESS)
623: #define MPI_Startall(count,array_of_requests) \
624: (MPIUNI_ARG(count),\
625: MPIUNI_ARG(array_of_requests),\
626: MPI_SUCCESS)
627: #define MPI_Sendrecv(sendbuf,sendcount,sendtype,\
628: dest,sendtag,recvbuf,recvcount,\
629: recvtype,source,recvtag,\
630: comm,status) \
631: (MPIUNI_ARG(dest),\
632: MPIUNI_ARG(sendtag),\
633: MPIUNI_ARG(recvcount),\
634: MPIUNI_ARG(recvtype),\
635: MPIUNI_ARG(source),\
636: MPIUNI_ARG(recvtag),\
637: MPIUNI_ARG(comm),\
638: MPIUNI_ARG(status),\
639: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
640: #define MPI_Sendrecv_replace(buf,count,datatype,dest,sendtag,\
641: source,recvtag,comm,status) \
642: (MPIUNI_ARG(buf),\
643: MPIUNI_ARG(count),\
644: MPIUNI_ARG(datatype),\
645: MPIUNI_ARG(dest),\
646: MPIUNI_ARG(sendtag),\
647: MPIUNI_ARG(source),\
648: MPIUNI_ARG(recvtag),\
649: MPIUNI_ARG(comm),\
650: MPIUNI_ARG(status),\
651: MPI_SUCCESS)
653: #define MPI_COMBINER_NAMED 0
654: #define MPI_COMBINER_DUP 1
655: #define MPI_COMBINER_CONTIGUOUS 2
656: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
657: #define MPI_Type_dup(oldtype,newtype) \
658: (*(newtype) = oldtype, MPI_SUCCESS)
659: #define MPI_Type_contiguous(count,oldtype,newtype) \
660: (*(newtype) = (MPI_COMBINER_CONTIGUOUS<<28)|((oldtype)&0x0ff00000)|(((oldtype)>>8&0xfff)*(count))<<8|((oldtype)&0xff), MPI_SUCCESS)
661: #define MPI_Type_vector(count,blocklength,stride,oldtype,newtype) \
662: (MPIUNI_ARG(count),\
663: MPIUNI_ARG(blocklength),\
664: MPIUNI_ARG(stride),\
665: MPIUNI_ARG(oldtype),\
666: MPIUNI_ARG(newtype),\
667: MPIUni_Abort(MPI_COMM_WORLD,0))
668: #define MPI_Type_hvector(count,blocklength,stride,oldtype,newtype) \
669: (MPIUNI_ARG(count),\
670: MPIUNI_ARG(blocklength),\
671: MPIUNI_ARG(stride),\
672: MPIUNI_ARG(oldtype),\
673: MPIUNI_ARG(newtype),\
674: MPIUni_Abort(MPI_COMM_WORLD,0))
675: #define MPI_Type_indexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
676: (MPIUNI_ARG(count),\
677: MPIUNI_ARG(array_of_blocklengths),\
678: MPIUNI_ARG(array_of_displacements),\
679: MPIUNI_ARG(oldtype),\
680: MPIUNI_ARG(newtype),\
681: MPIUni_Abort(MPI_COMM_WORLD,0))
682: #define MPI_Type_hindexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
683: (MPIUNI_ARG(count),\
684: MPIUNI_ARG(array_of_blocklengths),\
685: MPIUNI_ARG(array_of_displacements),\
686: MPIUNI_ARG(oldtype),\
687: MPIUNI_ARG(newtype),\
688: MPIUni_Abort(MPI_COMM_WORLD,0))
689: #define MPI_Type_struct(count,array_of_blocklengths,array_of_displacements,array_of_types,newtype) \
690: (MPIUNI_ARG(count),\
691: MPIUNI_ARG(array_of_blocklengths),\
692: MPIUNI_ARG(array_of_displacements),\
693: MPIUNI_ARG(array_of_types),\
694: MPIUNI_ARG(newtype),\
695: MPIUni_Abort(MPI_COMM_WORLD,0))
696: #define MPI_Address(location,address) \
697: (*(address) = (MPI_Aint)((char *)(location)), MPI_SUCCESS)
698: #define MPI_Type_size(datatype,size) (*(size) = MPI_sizeof((datatype)), MPI_SUCCESS)
699: #define MPI_Type_lb(datatype,lb) (MPIUNI_ARG(datatype), *(lb) = 0, MPI_SUCCESS)
700: #define MPI_Type_ub(datatype,ub) (*(ub) = MPI_sizeof((datatype)), MPI_SUCCESS)
701: #define MPI_Type_extent(datatype,extent) \
702: (*(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
703: #define MPI_Type_get_extent(datatype,lb,extent) \
704: (*(lb) = 0, *(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
705: #define MPI_Type_commit(datatype) (MPIUNI_ARG(datatype), MPI_SUCCESS)
706: #define MPI_Type_free(datatype) (*(datatype) = MPI_DATATYPE_NULL, MPI_SUCCESS)
707: #define MPI_Get_elements(status,datatype,count) \
708: (MPIUNI_ARG(status),\
709: MPIUNI_ARG(datatype),\
710: MPIUNI_ARG(count),\
711: MPIUni_Abort(MPI_COMM_WORLD,0))
712: #define MPI_Pack(inbuf,incount,datatype,outbuf,outsize,position,comm) \
713: (MPIUNI_ARG(inbuf),\
714: MPIUNI_ARG(incount),\
715: MPIUNI_ARG(datatype),\
716: MPIUNI_ARG(outbuf),\
717: MPIUNI_ARG(outsize),\
718: MPIUNI_ARG(position),\
719: MPIUNI_ARG(comm),\
720: MPIUni_Abort(MPI_COMM_WORLD,0))
721: #define MPI_Unpack(inbuf,insize,position,outbuf,outcount,datatype,comm) \
722: (MPIUNI_ARG(inbuf),\
723: MPIUNI_ARG(insize),\
724: MPIUNI_ARG(position),\
725: MPIUNI_ARG(outbuf),\
726: MPIUNI_ARG(outcount),\
727: MPIUNI_ARG(datatype),\
728: MPIUNI_ARG(comm),\
729: MPIUni_Abort(MPI_COMM_WORLD,0))
730: #define MPI_Pack_size(incount,datatype,comm,size) \
731: (MPIUNI_ARG(incount),\
732: MPIUNI_ARG(datatype),\
733: MPIUNI_ARG(comm),\
734: MPIUNI_ARG(size),\
735: MPIUni_Abort(MPI_COMM_WORLD,0))
736: #define MPI_Barrier(comm) \
737: (MPIUNI_ARG(comm),\
738: MPI_SUCCESS)
739: #define MPI_Bcast(buffer,count,datatype,root,comm) \
740: (MPIUNI_ARG(buffer),\
741: MPIUNI_ARG(count),\
742: MPIUNI_ARG(datatype),\
743: MPIUNI_ARG(root),\
744: MPIUNI_ARG(comm),\
745: MPI_SUCCESS)
746: #define MPI_Gather(sendbuf,sendcount,sendtype,\
747: recvbuf,recvcount, recvtype,\
748: root,comm) \
749: (MPIUNI_ARG(recvcount),\
750: MPIUNI_ARG(root),\
751: MPIUNI_ARG(recvtype),\
752: MPIUNI_ARG(comm),\
753: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
754: #define MPI_Gatherv(sendbuf,sendcount,sendtype,\
755: recvbuf,recvcounts,displs,\
756: recvtype,root,comm) \
757: (MPIUNI_ARG(recvcounts),\
758: MPIUNI_ARG(displs),\
759: MPIUNI_ARG(recvtype),\
760: MPIUNI_ARG(root),\
761: MPIUNI_ARG(comm),\
762: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
763: #define MPI_Scatter(sendbuf,sendcount,sendtype,\
764: recvbuf,recvcount,recvtype,\
765: root,comm) \
766: (MPIUNI_ARG(sendcount),\
767: MPIUNI_ARG(sendtype),\
768: MPIUNI_ARG(recvbuf),\
769: MPIUNI_ARG(recvtype),\
770: MPIUNI_ARG(root),\
771: MPIUNI_ARG(comm),\
772: MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
773: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
774: sendtype,recvbuf,recvcount,\
775: recvtype,root,comm) \
776: (MPIUNI_ARG(displs),\
777: MPIUNI_ARG(sendtype),\
778: MPIUNI_ARG(sendcounts),\
779: MPIUNI_ARG(root),\
780: MPIUNI_ARG(comm),\
781: MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
782: #define MPI_Allgather(sendbuf,sendcount,sendtype,\
783: recvbuf,recvcount,recvtype,comm) \
784: (MPIUNI_ARG(recvcount),\
785: MPIUNI_ARG(recvtype),\
786: MPIUNI_ARG(comm),\
787: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
788: #define MPI_Allgatherv(sendbuf,sendcount,sendtype,\
789: recvbuf,recvcounts,displs,recvtype,comm) \
790: (MPIUNI_ARG(recvcounts),\
791: MPIUNI_ARG(displs),\
792: MPIUNI_ARG(recvtype),\
793: MPIUNI_ARG(comm),\
794: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
795: #define MPI_Alltoall(sendbuf,sendcount,sendtype,\
796: recvbuf,recvcount,recvtype,comm) \
797: (MPIUNI_ARG(recvcount),\
798: MPIUNI_ARG(recvtype),\
799: MPIUNI_ARG(comm),\
800: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
801: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,sendtype,\
802: recvbuf,recvcounts,rdispls,recvtype,comm) \
803: (MPIUNI_ARG(sendbuf),\
804: MPIUNI_ARG(sendcounts),\
805: MPIUNI_ARG(sdispls),\
806: MPIUNI_ARG(sendtype),\
807: MPIUNI_ARG(recvbuf),\
808: MPIUNI_ARG(recvcounts),\
809: MPIUNI_ARG(rdispls),\
810: MPIUNI_ARG(recvtype),\
811: MPIUNI_ARG(comm),\
812: MPIUni_Abort(MPI_COMM_WORLD,0))
813: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,sendtypes,\
814: recvbuf,recvcounts,rdispls,recvtypes,comm) \
815: (MPIUNI_ARG(sendbuf),\
816: MPIUNI_ARG(sendcounts),\
817: MPIUNI_ARG(sdispls),\
818: MPIUNI_ARG(sendtypes),\
819: MPIUNI_ARG(recvbuf),\
820: MPIUNI_ARG(recvcount),\
821: MPIUNI_ARG(rdispls),\
822: MPIUNI_ARG(recvtypes),\
823: MPIUNI_ARG(comm),\
824: MPIUni_Abort(MPI_COMM_WORLD,0))
825: #define MPI_Reduce(sendbuf,recvbuf,count,datatype,op,root,comm) \
826: (MPIUNI_ARG(op),\
827: MPIUNI_ARG(root),\
828: MPIUNI_ARG(comm),\
829: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
830: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
831: (MPIUNI_ARG(op),\
832: MPIUNI_ARG(comm),\
833: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
834: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
835: (MPIUNI_ARG(op),\
836: MPIUNI_ARG(comm),\
837: MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
838: #define MPI_Exscan(sendbuf,recvbuf,count,datatype,op,comm) \
839: (MPIUNI_ARG(sendbuf),\
840: MPIUNI_ARG(recvbuf),\
841: MPIUNI_ARG(count),\
842: MPIUNI_ARG(datatype),\
843: MPIUNI_ARG(op),\
844: MPIUNI_ARG(comm),\
845: MPI_SUCCESS)
846: #define MPI_Reduce_scatter(sendbuf,recvbuf,recvcounts,datatype,op,comm) \
847: (MPIUNI_ARG(sendbuf),\
848: MPIUNI_ARG(recvbuf),\
849: MPIUNI_ARG(recvcounts),\
850: MPIUNI_ARG(datatype),\
851: MPIUNI_ARG(op),\
852: MPIUNI_ARG(comm),\
853: MPIUni_Abort(MPI_COMM_WORLD,0))
855: #define MPI_Op_create(function,commute,op) \
856: (MPIUNI_ARG(function),\
857: MPIUNI_ARG(commute),\
858: MPIUNI_ARG(op),\
859: MPI_SUCCESS)
860: #define MPI_Op_free(op) \
861: (*(op) = MPI_OP_NULL, MPI_SUCCESS)
863: #define MPI_Group_size(group,size) \
864: (MPIUNI_ARG(group),\
865: *(size)=1,\
866: MPI_SUCCESS)
867: #define MPI_Group_rank(group,rank) \
868: (MPIUNI_ARG(group),\
869: *(rank)=0,\
870: MPI_SUCCESS)
871: #define MPI_Group_translate_ranks(group1,n,ranks1,group2,ranks2) \
872: (MPIUNI_ARG(group1),\
873: MPIUNI_ARG(group2),\
874: MPIUNI_Memcpy((ranks2),(ranks1),(n)*sizeof(int)))
875: #define MPI_Group_compare(group1,group2,result) \
876: (MPIUNI_ARG(group1),\
877: MPIUNI_ARG(group2),\
878: *(result)=1,\
879: MPI_SUCCESS)
880: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
881: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
882: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
883: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
884: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
885: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
886: #define MPI_Group_free(group) \
887: (*(group) = MPI_GROUP_NULL, MPI_SUCCESS)
889: #define MPI_Comm_compare(comm1,comm2,result) \
890: (MPIUNI_ARG(comm1),\
891: MPIUNI_ARG(comm2),\
892: *(result)=MPI_IDENT,\
893: MPI_SUCCESS)
894: #define MPI_Comm_split(comm,color,key,newcomm) \
895: (MPIUNI_ARG(color),\
896: MPIUNI_ARG(key),\
897: MPI_Comm_dup(comm,newcomm))
898: #define MPI_Comm_split_type(comm,color,key,info,newcomm) \
899: (MPIUNI_ARG(color),\
900: MPIUNI_ARG(key),\
901: MPIUNI_ARG(info),\
902: MPI_Comm_dup(comm,newcomm))
903: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1, MPI_SUCCESS)
904: #define MPI_Comm_remote_size(comm,size) (*(size)=1 ,MPI_SUCCESS)
905: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
906: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
907: remote_leader,tag,newintercomm) MPI_SUCCESS
908: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
909: #define MPI_Topo_test(comm,flag) MPI_SUCCESS
910: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
911: reorder,comm_cart) MPIUni_Abort(MPI_COMM_WORLD,0)
912: #define MPI_Dims_create(nnodes,ndims,dims) MPIUni_Abort(MPI_COMM_WORLD,0)
913: #define MPI_Graph_create(comm,a,b,c,d,e) MPIUni_Abort(MPI_COMM_WORLD,0)
914: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPIUni_Abort(MPI_COMM_WORLD,0)
915: #define MPI_Graph_get(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
916: #define MPI_Cartdim_get(comm,ndims) MPIUni_Abort(MPI_COMM_WORLD,0)
917: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
918: MPIUni_Abort(MPI_COMM_WORLD,0)
919: #define MPI_Cart_rank(comm,coords,rank) MPIUni_Abort(MPI_COMM_WORLD,0)
920: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
921: MPIUni_Abort(MPI_COMM_WORLD,0)
922: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
923: MPIUni_Abort(MPI_COMM_WORLD,0)
924: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
925: MPIUni_Abort(MPI_COMM_WORLD,0)
926: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
927: MPIUni_Abort(MPI_COMM_WORLD,0)
928: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPIUni_Abort(MPI_COMM_WORLD,0)
929: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPIUni_Abort(MPI_COMM_WORLD,0)
930: #define MPI_Graph_map(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
932: #define MPI_Get_processor_name(name,result_len) \
933: (*(result_len) = 9,MPIUNI_Memcpy(name,"localhost",10*sizeof(char)))
934: #define MPI_Errhandler_create(function,errhandler) \
935: (MPIUNI_ARG(function),\
936: *(errhandler) = MPI_ERRORS_RETURN,\
937: MPI_SUCCESS)
938: #define MPI_Errhandler_set(comm,errhandler) \
939: (MPIUNI_ARG(comm),\
940: MPIUNI_ARG(errhandler),\
941: MPI_SUCCESS)
942: #define MPI_Errhandler_get(comm,errhandler) \
943: (MPIUNI_ARG(comm),\
944: (*errhandler) = MPI_ERRORS_RETURN,\
945: MPI_SUCCESS)
946: #define MPI_Errhandler_free(errhandler) \
947: (*(errhandler) = MPI_ERRHANDLER_NULL,\
948: MPI_SUCCESS)
949: #define MPI_Error_string(errorcode,string,result_len) \
950: (MPIUNI_ARG(errorcode),\
951: *(result_len) = 9,\
952: MPIUNI_Memcpy(string,"MPI error",10*sizeof(char)))
953: #define MPI_Error_class(errorcode,errorclass) \
954: (*(errorclass) = errorcode, MPI_SUCCESS)
955: #define MPI_Wtick() 1.0
956: #define MPI_Pcontrol(level) MPI_SUCCESS
958: /* MPI-IO additions */
960: typedef int MPI_File;
961: #define MPI_FILE_NULL 0
963: typedef int MPI_Offset;
965: #define MPI_MODE_RDONLY 0
966: #define MPI_MODE_WRONLY 0
967: #define MPI_MODE_CREATE 0
969: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
970: (MPIUNI_ARG(comm),\
971: MPIUNI_ARG(filename),\
972: MPIUNI_ARG(amode),\
973: MPIUNI_ARG(info),\
974: MPIUNI_ARG(mpi_fh),\
975: MPIUni_Abort(MPI_COMM_WORLD,0))
977: #define MPI_File_close(mpi_fh) \
978: (MPIUNI_ARG(mpi_fh),\
979: MPIUni_Abort(MPI_COMM_WORLD,0))
981: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
982: (MPIUNI_ARG(mpi_fh),\
983: MPIUNI_ARG(disp),\
984: MPIUNI_ARG(etype),\
985: MPIUNI_ARG(filetype),\
986: MPIUNI_ARG(datarep),\
987: MPIUNI_ARG(info),\
988: MPIUni_Abort(MPI_COMM_WORLD,0))
990: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
991: (MPIUNI_ARG(mpi_fh),\
992: MPIUNI_ARG(buf),\
993: MPIUNI_ARG(count),\
994: MPIUNI_ARG(datatype),\
995: MPIUNI_ARG(status),\
996: MPIUni_Abort(MPI_COMM_WORLD,0))
998: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
999: (MPIUNI_ARG(mpi_fh),\
1000: MPIUNI_ARG(buf),\
1001: MPIUNI_ARG(count),\
1002: MPIUNI_ARG(datatype),\
1003: MPIUNI_ARG(status),\
1004: MPIUni_Abort(MPI_COMM_WORLD,0))
1006: /* called from PetscInitialize() - so return success */
1007: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
1008: (MPIUNI_ARG(name),\
1009: MPIUNI_ARG(read_conv_fn),\
1010: MPIUNI_ARG(write_conv_fn),\
1011: MPIUNI_ARG(extent_fn),\
1012: MPIUNI_ARG(state),\
1013: MPI_SUCCESS)
1015: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
1016: (MPIUNI_ARG(ndims),\
1017: MPIUNI_ARG(array_of_sizes),\
1018: MPIUNI_ARG(array_of_subsizes),\
1019: MPIUNI_ARG(array_of_starts),\
1020: MPIUNI_ARG(order),\
1021: MPIUNI_ARG(oldtype),\
1022: MPIUNI_ARG(newtype),\
1023: MPIUni_Abort(MPI_COMM_WORLD,0))
1025: #define MPI_Type_create_resized(oldtype,lb,extent,newtype) \
1026: (MPIUNI_ARG(oldtype),\
1027: MPIUNI_ARG(lb),\
1028: MPIUNI_ARG(extent),\
1029: MPIUNI_ARG(newtype),\
1030: MPIUni_Abort(MPI_COMM_WORLD,0))
1032: #if defined(__cplusplus)
1033: }
1034: #endif
1035: #endif