Actual source code: sfnvshmem.cu
1: #include <petsc/private/cudavecimpl.h>
2: #include <../src/vec/is/sf/impls/basic/sfpack.h>
3: #include <mpi.h>
4: #include <nvshmem.h>
5: #include <nvshmemx.h>
7: PetscErrorCode PetscNvshmemInitializeCheck(void)
8: {
9: if (!PetscNvshmemInitialized) { /* Note NVSHMEM does not provide a routine to check whether it is initialized */
10: nvshmemx_init_attr_t attr;
11: attr.mpi_comm = &PETSC_COMM_WORLD;
12: PetscDeviceInitialize(PETSC_DEVICE_CUDA);
13: nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM,&attr);
14: PetscNvshmemInitialized = PETSC_TRUE;
15: PetscBeganNvshmem = PETSC_TRUE;
16: }
17: return 0;
18: }
20: PetscErrorCode PetscNvshmemMalloc(size_t size, void** ptr)
21: {
22: PetscNvshmemInitializeCheck();
23: *ptr = nvshmem_malloc(size);
25: return 0;
26: }
28: PetscErrorCode PetscNvshmemCalloc(size_t size, void**ptr)
29: {
30: PetscNvshmemInitializeCheck();
31: *ptr = nvshmem_calloc(size,1);
33: return 0;
34: }
36: PetscErrorCode PetscNvshmemFree_Private(void* ptr)
37: {
38: nvshmem_free(ptr);
39: return 0;
40: }
42: PetscErrorCode PetscNvshmemFinalize(void)
43: {
44: nvshmem_finalize();
45: return 0;
46: }
48: /* Free nvshmem related fields in the SF */
49: PetscErrorCode PetscSFReset_Basic_NVSHMEM(PetscSF sf)
50: {
51: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
53: PetscFree2(bas->leafsigdisp,bas->leafbufdisp);
54: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafbufdisp_d);
55: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->leafsigdisp_d);
56: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->iranks_d);
57: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,bas->ioffset_d);
59: PetscFree2(sf->rootsigdisp,sf->rootbufdisp);
60: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootbufdisp_d);
61: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->rootsigdisp_d);
62: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->ranks_d);
63: PetscSFFree(sf,PETSC_MEMTYPE_CUDA,sf->roffset_d);
64: return 0;
65: }
67: /* Set up NVSHMEM related fields for an SF of type SFBASIC (only after PetscSFSetup_Basic() already set up dependant fields */
68: static PetscErrorCode PetscSFSetUp_Basic_NVSHMEM(PetscSF sf)
69: {
70: cudaError_t cerr;
71: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
72: PetscInt i,nRemoteRootRanks,nRemoteLeafRanks;
73: PetscMPIInt tag;
74: MPI_Comm comm;
75: MPI_Request *rootreqs,*leafreqs;
76: PetscInt tmp,stmp[4],rtmp[4]; /* tmps for send/recv buffers */
78: PetscObjectGetComm((PetscObject)sf,&comm);
79: PetscObjectGetNewTag((PetscObject)sf,&tag);
81: nRemoteRootRanks = sf->nranks-sf->ndranks;
82: nRemoteLeafRanks = bas->niranks-bas->ndiranks;
83: sf->nRemoteRootRanks = nRemoteRootRanks;
84: bas->nRemoteLeafRanks = nRemoteLeafRanks;
86: PetscMalloc2(nRemoteLeafRanks,&rootreqs,nRemoteRootRanks,&leafreqs);
88: stmp[0] = nRemoteRootRanks;
89: stmp[1] = sf->leafbuflen[PETSCSF_REMOTE];
90: stmp[2] = nRemoteLeafRanks;
91: stmp[3] = bas->rootbuflen[PETSCSF_REMOTE];
93: MPIU_Allreduce(stmp,rtmp,4,MPIU_INT,MPI_MAX,comm);
95: sf->nRemoteRootRanksMax = rtmp[0];
96: sf->leafbuflen_rmax = rtmp[1];
97: bas->nRemoteLeafRanksMax = rtmp[2];
98: bas->rootbuflen_rmax = rtmp[3];
100: /* Total four rounds of MPI communications to set up the nvshmem fields */
102: /* Root ranks to leaf ranks: send info about rootsigdisp[] and rootbufdisp[] */
103: PetscMalloc2(nRemoteRootRanks,&sf->rootsigdisp,nRemoteRootRanks,&sf->rootbufdisp);
104: for (i=0; i<nRemoteRootRanks; i++) MPI_Irecv(&sf->rootsigdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]); /* Leaves recv */
105: for (i=0; i<nRemoteLeafRanks; i++) MPI_Send(&i,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm); /* Roots send. Note i changes, so we use MPI_Send. */
106: MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);
108: for (i=0; i<nRemoteRootRanks; i++) MPI_Irecv(&sf->rootbufdisp[i],1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm,&leafreqs[i]); /* Leaves recv */
109: for (i=0; i<nRemoteLeafRanks; i++) {
110: tmp = bas->ioffset[i+bas->ndiranks] - bas->ioffset[bas->ndiranks];
111: MPI_Send(&tmp,1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm); /* Roots send. Note tmp changes, so we use MPI_Send. */
112: }
113: MPI_Waitall(nRemoteRootRanks,leafreqs,MPI_STATUSES_IGNORE);
115: cudaMalloc((void**)&sf->rootbufdisp_d,nRemoteRootRanks*sizeof(PetscInt));
116: cudaMalloc((void**)&sf->rootsigdisp_d,nRemoteRootRanks*sizeof(PetscInt));
117: cudaMalloc((void**)&sf->ranks_d,nRemoteRootRanks*sizeof(PetscMPIInt));
118: cudaMalloc((void**)&sf->roffset_d,(nRemoteRootRanks+1)*sizeof(PetscInt));
120: cudaMemcpyAsync(sf->rootbufdisp_d,sf->rootbufdisp,nRemoteRootRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
121: cudaMemcpyAsync(sf->rootsigdisp_d,sf->rootsigdisp,nRemoteRootRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
122: cudaMemcpyAsync(sf->ranks_d,sf->ranks+sf->ndranks,nRemoteRootRanks*sizeof(PetscMPIInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
123: cudaMemcpyAsync(sf->roffset_d,sf->roffset+sf->ndranks,(nRemoteRootRanks+1)*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
125: /* Leaf ranks to root ranks: send info about leafsigdisp[] and leafbufdisp[] */
126: PetscMalloc2(nRemoteLeafRanks,&bas->leafsigdisp,nRemoteLeafRanks,&bas->leafbufdisp);
127: for (i=0; i<nRemoteLeafRanks; i++) MPI_Irecv(&bas->leafsigdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);
128: for (i=0; i<nRemoteRootRanks; i++) MPI_Send(&i,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);
129: MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);
131: for (i=0; i<nRemoteLeafRanks; i++) MPI_Irecv(&bas->leafbufdisp[i],1,MPIU_INT,bas->iranks[i+bas->ndiranks],tag,comm,&rootreqs[i]);
132: for (i=0; i<nRemoteRootRanks; i++) {
133: tmp = sf->roffset[i+sf->ndranks] - sf->roffset[sf->ndranks];
134: MPI_Send(&tmp,1,MPIU_INT,sf->ranks[i+sf->ndranks],tag,comm);
135: }
136: MPI_Waitall(nRemoteLeafRanks,rootreqs,MPI_STATUSES_IGNORE);
138: cudaMalloc((void**)&bas->leafbufdisp_d,nRemoteLeafRanks*sizeof(PetscInt));
139: cudaMalloc((void**)&bas->leafsigdisp_d,nRemoteLeafRanks*sizeof(PetscInt));
140: cudaMalloc((void**)&bas->iranks_d,nRemoteLeafRanks*sizeof(PetscMPIInt));
141: cudaMalloc((void**)&bas->ioffset_d,(nRemoteLeafRanks+1)*sizeof(PetscInt));
143: cudaMemcpyAsync(bas->leafbufdisp_d,bas->leafbufdisp,nRemoteLeafRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
144: cudaMemcpyAsync(bas->leafsigdisp_d,bas->leafsigdisp,nRemoteLeafRanks*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
145: cudaMemcpyAsync(bas->iranks_d,bas->iranks+bas->ndiranks,nRemoteLeafRanks*sizeof(PetscMPIInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
146: cudaMemcpyAsync(bas->ioffset_d,bas->ioffset+bas->ndiranks,(nRemoteLeafRanks+1)*sizeof(PetscInt),cudaMemcpyHostToDevice,PetscDefaultCudaStream);
148: PetscFree2(rootreqs,leafreqs);
149: return 0;
150: }
152: PetscErrorCode PetscSFLinkNvshmemCheck(PetscSF sf,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,PetscBool *use_nvshmem)
153: {
154: MPI_Comm comm;
155: PetscBool isBasic;
156: PetscMPIInt result = MPI_UNEQUAL;
158: PetscObjectGetComm((PetscObject)sf,&comm);
159: /* Check if the sf is eligible for NVSHMEM, if we have not checked yet.
160: Note the check result <use_nvshmem> must be the same over comm, since an SFLink must be collectively either NVSHMEM or MPI.
161: */
162: sf->checked_nvshmem_eligibility = PETSC_TRUE;
163: if (sf->use_nvshmem && !sf->checked_nvshmem_eligibility) {
164: /* Only use NVSHMEM for SFBASIC on PETSC_COMM_WORLD */
165: PetscObjectTypeCompare((PetscObject)sf,PETSCSFBASIC,&isBasic);
166: if (isBasic) MPI_Comm_compare(PETSC_COMM_WORLD,comm,&result);
167: if (!isBasic || (result != MPI_IDENT && result != MPI_CONGRUENT)) sf->use_nvshmem = PETSC_FALSE; /* If not eligible, clear the flag so that we don't try again */
169: /* Do further check: If on a rank, both rootdata and leafdata are NULL, we might think they are PETSC_MEMTYPE_CUDA (or HOST)
170: and then use NVSHMEM. But if root/leafmtypes on other ranks are PETSC_MEMTYPE_HOST (or DEVICE), this would lead to
171: inconsistency on the return value <use_nvshmem>. To be safe, we simply disable nvshmem on these rare SFs.
172: */
173: if (sf->use_nvshmem) {
174: PetscInt hasNullRank = (!rootdata && !leafdata) ? 1 : 0;
175: MPI_Allreduce(MPI_IN_PLACE,&hasNullRank,1,MPIU_INT,MPI_LOR,comm);
176: if (hasNullRank) sf->use_nvshmem = PETSC_FALSE;
177: }
178: sf->checked_nvshmem_eligibility = PETSC_TRUE; /* If eligible, don't do above check again */
179: }
181: /* Check if rootmtype and leafmtype collectively are PETSC_MEMTYPE_CUDA */
182: if (sf->use_nvshmem) {
183: PetscInt oneCuda = (!rootdata || PetscMemTypeCUDA(rootmtype)) && (!leafdata || PetscMemTypeCUDA(leafmtype)) ? 1 : 0; /* Do I use cuda for both root&leafmtype? */
184: PetscInt allCuda = oneCuda; /* Assume the same for all ranks. But if not, in opt mode, return value <use_nvshmem> won't be collective! */
185: #if defined(PETSC_USE_DEBUG) /* Check in debug mode. Note MPI_Allreduce is expensive, so only in debug mode */
186: MPI_Allreduce(&oneCuda,&allCuda,1,MPIU_INT,MPI_LAND,comm);
188: #endif
189: if (allCuda) {
190: PetscNvshmemInitializeCheck();
191: if (!sf->setup_nvshmem) { /* Set up nvshmem related fields on this SF on-demand */
192: PetscSFSetUp_Basic_NVSHMEM(sf);
193: sf->setup_nvshmem = PETSC_TRUE;
194: }
195: *use_nvshmem = PETSC_TRUE;
196: } else {
197: *use_nvshmem = PETSC_FALSE;
198: }
199: } else {
200: *use_nvshmem = PETSC_FALSE;
201: }
202: return 0;
203: }
205: /* Build dependence between <stream> and <remoteCommStream> at the entry of NVSHMEM communication */
206: static PetscErrorCode PetscSFLinkBuildDependenceBegin(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
207: {
208: cudaError_t cerr;
209: PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
210: PetscInt buflen = (direction == PETSCSF_../../../../../..2LEAF)? bas->rootbuflen[PETSCSF_REMOTE] : sf->leafbuflen[PETSCSF_REMOTE];
212: if (buflen) {
213: cudaEventRecord(link->dataReady,link->stream);
214: cudaStreamWaitEvent(link->remoteCommStream,link->dataReady,0);
215: }
216: return 0;
217: }
219: /* Build dependence between <stream> and <remoteCommStream> at the exit of NVSHMEM communication */
220: static PetscErrorCode PetscSFLinkBuildDependenceEnd(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
221: {
222: cudaError_t cerr;
223: PetscSF_Basic *bas = (PetscSF_Basic *)sf->data;
224: PetscInt buflen = (direction == PETSCSF_../../../../../..2LEAF)? sf->leafbuflen[PETSCSF_REMOTE] : bas->rootbuflen[PETSCSF_REMOTE];
226: /* If unpack to non-null device buffer, build the endRemoteComm dependance */
227: if (buflen) {
228: cudaEventRecord(link->endRemoteComm,link->remoteCommStream);
229: cudaStreamWaitEvent(link->stream,link->endRemoteComm,0);
230: }
231: return 0;
232: }
234: /* Send/Put signals to remote ranks
236: Input parameters:
237: + n - Number of remote ranks
238: . sig - Signal address in symmetric heap
239: . sigdisp - To i-th rank, use its signal at offset sigdisp[i]
240: . ranks - remote ranks
241: - newval - Set signals to this value
242: */
243: __global__ static void NvshmemSendSignals(PetscInt n,uint64_t *sig,PetscInt *sigdisp,PetscMPIInt *ranks,uint64_t newval)
244: {
245: int i = blockIdx.x*blockDim.x + threadIdx.x;
247: /* Each thread puts one remote signal */
248: if (i < n) nvshmemx_uint64_signal(sig+sigdisp[i],newval,ranks[i]);
249: }
251: /* Wait until local signals equal to the expected value and then set them to a new value
253: Input parameters:
254: + n - Number of signals
255: . sig - Local signal address
256: . expval - expected value
257: - newval - Set signals to this new value
258: */
259: __global__ static void NvshmemWaitSignals(PetscInt n,uint64_t *sig,uint64_t expval,uint64_t newval)
260: {
261: #if 0
262: /* Akhil Langer@NVIDIA said using 1 thread and nvshmem_uint64_wait_until_all is better */
263: int i = blockIdx.x*blockDim.x + threadIdx.x;
264: if (i < n) {
265: nvshmem_signal_wait_until(sig+i,NVSHMEM_CMP_EQ,expval);
266: sig[i] = newval;
267: }
268: #else
269: nvshmem_uint64_wait_until_all(sig,n,NULL/*no mask*/,NVSHMEM_CMP_EQ,expval);
270: for (int i=0; i<n; i++) sig[i] = newval;
271: #endif
272: }
274: /* ===========================================================================================================
276: A set of routines to support receiver initiated communication using the get method
278: The getting protocol is:
280: Sender has a send buf (sbuf) and a signal variable (ssig); Receiver has a recv buf (rbuf) and a signal variable (rsig);
281: All signal variables have an initial value 0.
283: Sender: | Receiver:
284: 1. Wait ssig be 0, then set it to 1
285: 2. Pack data into stand alone sbuf |
286: 3. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
287: | 2. Get data from remote sbuf to local rbuf
288: | 3. Put 1 to sender's ssig
289: | 4. Unpack data from local rbuf
290: ===========================================================================================================*/
291: /* PrePack operation -- since sender will overwrite the send buffer which the receiver might be getting data from.
292: Sender waits for signals (from receivers) indicating receivers have finished getting data
293: */
294: PetscErrorCode PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
295: {
296: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
297: uint64_t *sig;
298: PetscInt n;
300: if (direction == PETSCSF_../../../../../..2LEAF) { /* leaf ranks are getting data */
301: sig = link->rootSendSig; /* leaf ranks set my rootSendsig */
302: n = bas->nRemoteLeafRanks;
303: } else { /* LEAF2../../../../../.. */
304: sig = link->leafSendSig;
305: n = sf->nRemoteRootRanks;
306: }
308: if (n) {
309: NvshmemWaitSignals<<<1,1,0,link->remoteCommStream>>>(n,sig,0,1); /* wait the signals to be 0, then set them to 1 */
310: cudaGetLastError();
311: }
312: return 0;
313: }
315: /* n thread blocks. Each takes in charge one remote rank */
316: __global__ static void GetDataFromRemotelyAccessible(PetscInt nsrcranks,PetscMPIInt *srcranks,const char *src,PetscInt *srcdisp,char *dst,PetscInt *dstdisp,PetscInt unitbytes)
317: {
318: int bid = blockIdx.x;
319: PetscMPIInt pe = srcranks[bid];
321: if (!nvshmem_ptr(src,pe)) {
322: PetscInt nelems = (dstdisp[bid+1]-dstdisp[bid])*unitbytes;
323: nvshmem_getmem_nbi(dst+(dstdisp[bid]-dstdisp[0])*unitbytes,src+srcdisp[bid]*unitbytes,nelems,pe);
324: }
325: }
327: /* Start communication -- Get data in the given direction */
328: PetscErrorCode PetscSFLinkGetDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
329: {
330: cudaError_t cerr;
331: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
333: PetscInt nsrcranks,ndstranks,nLocallyAccessible = 0;
335: char *src,*dst;
336: PetscInt *srcdisp_h,*dstdisp_h;
337: PetscInt *srcdisp_d,*dstdisp_d;
338: PetscMPIInt *srcranks_h;
339: PetscMPIInt *srcranks_d,*dstranks_d;
340: uint64_t *dstsig;
341: PetscInt *dstsigdisp_d;
343: PetscSFLinkBuildDependenceBegin(sf,link,direction);
344: if (direction == PETSCSF_../../../../../..2LEAF) { /* src is root, dst is leaf; we will move data from src to dst */
345: nsrcranks = sf->nRemoteRootRanks;
346: src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* root buf is the send buf; it is in symmetric heap */
348: srcdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
349: srcdisp_d = sf->rootbufdisp_d;
350: srcranks_h = sf->ranks+sf->ndranks; /* my (remote) root ranks */
351: srcranks_d = sf->ranks_d;
353: ndstranks = bas->nRemoteLeafRanks;
354: dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* recv buf is the local leaf buf, also in symmetric heap */
356: dstdisp_h = sf->roffset+sf->ndranks; /* offsets of the local leaf buf. Note dstdisp[0] is not necessarily 0 */
357: dstdisp_d = sf->roffset_d;
358: dstranks_d = bas->iranks_d; /* my (remote) leaf ranks */
360: dstsig = link->leafRecvSig;
361: dstsigdisp_d = bas->leafsigdisp_d;
362: } else { /* src is leaf, dst is root; we will move data from src to dst */
363: nsrcranks = bas->nRemoteLeafRanks;
364: src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* leaf buf is the send buf */
366: srcdisp_h = bas->leafbufdisp; /* for my i-th remote root rank, I will access its buf at offset rootbufdisp[i] */
367: srcdisp_d = bas->leafbufdisp_d;
368: srcranks_h = bas->iranks+bas->ndiranks; /* my (remote) root ranks */
369: srcranks_d = bas->iranks_d;
371: ndstranks = sf->nRemoteRootRanks;
372: dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* the local root buf is the recv buf */
374: dstdisp_h = bas->ioffset+bas->ndiranks; /* offsets of the local root buf. Note dstdisp[0] is not necessarily 0 */
375: dstdisp_d = bas->ioffset_d;
376: dstranks_d = sf->ranks_d; /* my (remote) root ranks */
378: dstsig = link->rootRecvSig;
379: dstsigdisp_d = sf->rootsigdisp_d;
380: }
382: /* After Pack operation -- src tells dst ranks that they are allowed to get data */
383: if (ndstranks) {
384: NvshmemSendSignals<<<(ndstranks+255)/256,256,0,link->remoteCommStream>>>(ndstranks,dstsig,dstsigdisp_d,dstranks_d,1); /* set signals to 1 */
385: cudaGetLastError();
386: }
388: /* dst waits for signals (permissions) from src ranks to start getting data */
389: if (nsrcranks) {
390: NvshmemWaitSignals<<<1,1,0,link->remoteCommStream>>>(nsrcranks,dstsig,1,0); /* wait the signals to be 1, then set them to 0 */
391: cudaGetLastError();
392: }
394: /* dst gets data from src ranks using non-blocking nvshmem_gets, which are finished in PetscSFLinkGetDataEnd_NVSHMEM() */
396: /* Count number of locally accessible src ranks, which should be a small number */
397: for (int i=0; i<nsrcranks; i++) {if (nvshmem_ptr(src,srcranks_h[i])) nLocallyAccessible++;}
399: /* Get data from remotely accessible PEs */
400: if (nLocallyAccessible < nsrcranks) {
401: GetDataFromRemotelyAccessible<<<nsrcranks,1,0,link->remoteCommStream>>>(nsrcranks,srcranks_d,src,srcdisp_d,dst,dstdisp_d,link->unitbytes);
402: cudaGetLastError();
403: }
405: /* Get data from locally accessible PEs */
406: if (nLocallyAccessible) {
407: for (int i=0; i<nsrcranks; i++) {
408: int pe = srcranks_h[i];
409: if (nvshmem_ptr(src,pe)) {
410: size_t nelems = (dstdisp_h[i+1]-dstdisp_h[i])*link->unitbytes;
411: nvshmemx_getmem_nbi_on_stream(dst+(dstdisp_h[i]-dstdisp_h[0])*link->unitbytes,src+srcdisp_h[i]*link->unitbytes,nelems,pe,link->remoteCommStream);
412: }
413: }
414: }
415: return 0;
416: }
418: /* Finish the communication (can be done before Unpack)
419: Receiver tells its senders that they are allowed to reuse their send buffer (since receiver has got data from their send buffer)
420: */
421: PetscErrorCode PetscSFLinkGetDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
422: {
423: cudaError_t cerr;
424: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
425: uint64_t *srcsig;
426: PetscInt nsrcranks,*srcsigdisp;
427: PetscMPIInt *srcranks;
429: if (direction == PETSCSF_../../../../../..2LEAF) { /* leaf ranks are getting data */
430: nsrcranks = sf->nRemoteRootRanks;
431: srcsig = link->rootSendSig; /* I want to set their root signal */
432: srcsigdisp = sf->rootsigdisp_d; /* offset of each root signal */
433: srcranks = sf->ranks_d; /* ranks of the n root ranks */
434: } else { /* LEAF2../../../../../.., root ranks are getting data */
435: nsrcranks = bas->nRemoteLeafRanks;
436: srcsig = link->leafSendSig;
437: srcsigdisp = bas->leafsigdisp_d;
438: srcranks = bas->iranks_d;
439: }
441: if (nsrcranks) {
442: nvshmemx_quiet_on_stream(link->remoteCommStream); /* Finish the nonblocking get, so that we can unpack afterwards */
443: cudaGetLastError();
444: NvshmemSendSignals<<<(nsrcranks+511)/512,512,0,link->remoteCommStream>>>(nsrcranks,srcsig,srcsigdisp,srcranks,0); /* set signals to 0 */
445: cudaGetLastError();
446: }
447: PetscSFLinkBuildDependenceEnd(sf,link,direction);
448: return 0;
449: }
451: /* ===========================================================================================================
453: A set of routines to support sender initiated communication using the put-based method (the default)
455: The putting protocol is:
457: Sender has a send buf (sbuf) and a send signal var (ssig); Receiver has a stand-alone recv buf (rbuf)
458: and a recv signal var (rsig); All signal variables have an initial value 0. rbuf is allocated by SF and
459: is in nvshmem space.
461: Sender: | Receiver:
462: |
463: 1. Pack data into sbuf |
464: 2. Wait ssig be 0, then set it to 1 |
465: 3. Put data to remote stand-alone rbuf |
466: 4. Fence // make sure 5 happens after 3 |
467: 5. Put 1 to receiver's rsig | 1. Wait rsig to be 1, then set it 0
468: | 2. Unpack data from local rbuf
469: | 3. Put 0 to sender's ssig
470: ===========================================================================================================*/
472: /* n thread blocks. Each takes in charge one remote rank */
473: __global__ static void WaitAndPutDataToRemotelyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,char *dst,PetscInt *dstdisp,const char *src,PetscInt *srcdisp,uint64_t *srcsig,PetscInt unitbytes)
474: {
475: int bid = blockIdx.x;
476: PetscMPIInt pe = dstranks[bid];
478: if (!nvshmem_ptr(dst,pe)) {
479: PetscInt nelems = (srcdisp[bid+1]-srcdisp[bid])*unitbytes;
480: nvshmem_uint64_wait_until(srcsig+bid,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */
481: srcsig[bid] = 1;
482: nvshmem_putmem_nbi(dst+dstdisp[bid]*unitbytes,src+(srcdisp[bid]-srcdisp[0])*unitbytes,nelems,pe);
483: }
484: }
486: /* one-thread kernel, which takes in charge all locally accesible */
487: __global__ static void WaitSignalsFromLocallyAccessible(PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *srcsig,const char *dst)
488: {
489: for (int i=0; i<ndstranks; i++) {
490: int pe = dstranks[i];
491: if (nvshmem_ptr(dst,pe)) {
492: nvshmem_uint64_wait_until(srcsig+i,NVSHMEM_CMP_EQ,0); /* Wait until the sig = 0 */
493: srcsig[i] = 1;
494: }
495: }
496: }
498: /* Put data in the given direction */
499: PetscErrorCode PetscSFLinkPutDataBegin_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
500: {
501: cudaError_t cerr;
502: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
503: PetscInt ndstranks,nLocallyAccessible = 0;
504: char *src,*dst;
505: PetscInt *srcdisp_h,*dstdisp_h;
506: PetscInt *srcdisp_d,*dstdisp_d;
507: PetscMPIInt *dstranks_h;
508: PetscMPIInt *dstranks_d;
509: uint64_t *srcsig;
511: PetscSFLinkBuildDependenceBegin(sf,link,direction);
512: if (direction == PETSCSF_../../../../../..2LEAF) { /* put data in rootbuf to leafbuf */
513: ndstranks = bas->nRemoteLeafRanks; /* number of (remote) leaf ranks */
514: src = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]; /* Both src & dst must be symmetric */
515: dst = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
517: srcdisp_h = bas->ioffset+bas->ndiranks; /* offsets of rootbuf. srcdisp[0] is not necessarily zero */
518: srcdisp_d = bas->ioffset_d;
519: srcsig = link->rootSendSig;
521: dstdisp_h = bas->leafbufdisp; /* for my i-th remote leaf rank, I will access its leaf buf at offset leafbufdisp[i] */
522: dstdisp_d = bas->leafbufdisp_d;
523: dstranks_h = bas->iranks+bas->ndiranks; /* remote leaf ranks */
524: dstranks_d = bas->iranks_d;
525: } else { /* put data in leafbuf to rootbuf */
526: ndstranks = sf->nRemoteRootRanks;
527: src = link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
528: dst = link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
530: srcdisp_h = sf->roffset+sf->ndranks; /* offsets of leafbuf */
531: srcdisp_d = sf->roffset_d;
532: srcsig = link->leafSendSig;
534: dstdisp_h = sf->rootbufdisp; /* for my i-th remote root rank, I will access its root buf at offset rootbufdisp[i] */
535: dstdisp_d = sf->rootbufdisp_d;
536: dstranks_h = sf->ranks+sf->ndranks; /* remote root ranks */
537: dstranks_d = sf->ranks_d;
538: }
540: /* Wait for signals and then put data to dst ranks using non-blocking nvshmem_put, which are finished in PetscSFLinkPutDataEnd_NVSHMEM */
542: /* Count number of locally accessible neighbors, which should be a small number */
543: for (int i=0; i<ndstranks; i++) {if (nvshmem_ptr(dst,dstranks_h[i])) nLocallyAccessible++;}
545: /* For remotely accessible PEs, send data to them in one kernel call */
546: if (nLocallyAccessible < ndstranks) {
547: WaitAndPutDataToRemotelyAccessible<<<ndstranks,1,0,link->remoteCommStream>>>(ndstranks,dstranks_d,dst,dstdisp_d,src,srcdisp_d,srcsig,link->unitbytes);
548: cudaGetLastError();
549: }
551: /* For locally accessible PEs, use host API, which uses CUDA copy-engines and is much faster than device API */
552: if (nLocallyAccessible) {
553: WaitSignalsFromLocallyAccessible<<<1,1,0,link->remoteCommStream>>>(ndstranks,dstranks_d,srcsig,dst);
554: for (int i=0; i<ndstranks; i++) {
555: int pe = dstranks_h[i];
556: if (nvshmem_ptr(dst,pe)) { /* If return a non-null pointer, then <pe> is locally accessible */
557: size_t nelems = (srcdisp_h[i+1]-srcdisp_h[i])*link->unitbytes;
558: /* Initiate the nonblocking communication */
559: nvshmemx_putmem_nbi_on_stream(dst+dstdisp_h[i]*link->unitbytes,src+(srcdisp_h[i]-srcdisp_h[0])*link->unitbytes,nelems,pe,link->remoteCommStream);
560: }
561: }
562: }
564: if (nLocallyAccessible) {
565: nvshmemx_quiet_on_stream(link->remoteCommStream); /* Calling nvshmem_fence/quiet() does not fence the above nvshmemx_putmem_nbi_on_stream! */
566: }
567: return 0;
568: }
570: /* A one-thread kernel. The thread takes in charge all remote PEs */
571: __global__ static void PutDataEnd(PetscInt nsrcranks,PetscInt ndstranks,PetscMPIInt *dstranks,uint64_t *dstsig,PetscInt *dstsigdisp)
572: {
573: /* TODO: Shall we finished the non-blocking remote puts? */
575: /* 1. Send a signal to each dst rank */
577: /* According to Akhil@NVIDIA, IB is orderred, so no fence is needed for remote PEs.
578: For local PEs, we already called nvshmemx_quiet_on_stream(). Therefore, we are good to send signals to all dst ranks now.
579: */
580: for (int i=0; i<ndstranks; i++) {nvshmemx_uint64_signal(dstsig+dstsigdisp[i],1,dstranks[i]);} /* set sig to 1 */
582: /* 2. Wait for signals from src ranks (if any) */
583: if (nsrcranks) {
584: nvshmem_uint64_wait_until_all(dstsig,nsrcranks,NULL/*no mask*/,NVSHMEM_CMP_EQ,1); /* wait sigs to be 1, then set them to 0 */
585: for (int i=0; i<nsrcranks; i++) dstsig[i] = 0;
586: }
587: }
589: /* Finish the communication -- A receiver waits until it can access its receive buffer */
590: PetscErrorCode PetscSFLinkPutDataEnd_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
591: {
592: cudaError_t cerr;
593: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
594: PetscMPIInt *dstranks;
595: uint64_t *dstsig;
596: PetscInt nsrcranks,ndstranks,*dstsigdisp;
598: if (direction == PETSCSF_../../../../../..2LEAF) { /* put root data to leaf */
599: nsrcranks = sf->nRemoteRootRanks;
601: ndstranks = bas->nRemoteLeafRanks;
602: dstranks = bas->iranks_d; /* leaf ranks */
603: dstsig = link->leafRecvSig; /* I will set my leaf ranks's RecvSig */
604: dstsigdisp = bas->leafsigdisp_d; /* for my i-th remote leaf rank, I will access its signal at offset leafsigdisp[i] */
605: } else { /* LEAF2../../../../../.. */
606: nsrcranks = bas->nRemoteLeafRanks;
608: ndstranks = sf->nRemoteRootRanks;
609: dstranks = sf->ranks_d;
610: dstsig = link->rootRecvSig;
611: dstsigdisp = sf->rootsigdisp_d;
612: }
614: if (nsrcranks || ndstranks) {
615: PutDataEnd<<<1,1,0,link->remoteCommStream>>>(nsrcranks,ndstranks,dstranks,dstsig,dstsigdisp);
616: cudaGetLastError();
617: }
618: PetscSFLinkBuildDependenceEnd(sf,link,direction);
619: return 0;
620: }
622: /* PostUnpack operation -- A receiver tells its senders that they are allowed to put data to here (it implies recv buf is free to take new data) */
623: PetscErrorCode PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM(PetscSF sf,PetscSFLink link,PetscSFDirection direction)
624: {
625: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
626: uint64_t *srcsig;
627: PetscInt nsrcranks,*srcsigdisp_d;
628: PetscMPIInt *srcranks_d;
630: if (direction == PETSCSF_../../../../../..2LEAF) { /* I allow my root ranks to put data to me */
631: nsrcranks = sf->nRemoteRootRanks;
632: srcsig = link->rootSendSig; /* I want to set their send signals */
633: srcsigdisp_d = sf->rootsigdisp_d; /* offset of each root signal */
634: srcranks_d = sf->ranks_d; /* ranks of the n root ranks */
635: } else { /* LEAF2../../../../../.. */
636: nsrcranks = bas->nRemoteLeafRanks;
637: srcsig = link->leafSendSig;
638: srcsigdisp_d = bas->leafsigdisp_d;
639: srcranks_d = bas->iranks_d;
640: }
642: if (nsrcranks) {
643: NvshmemSendSignals<<<(nsrcranks+255)/256,256,0,link->remoteCommStream>>>(nsrcranks,srcsig,srcsigdisp_d,srcranks_d,0); /* Set remote signals to 0 */
644: cudaGetLastError();
645: }
646: return 0;
647: }
649: /* Destructor when the link uses nvshmem for communication */
650: static PetscErrorCode PetscSFLinkDestroy_NVSHMEM(PetscSF sf,PetscSFLink link)
651: {
652: cudaError_t cerr;
654: cudaEventDestroy(link->dataReady);
655: cudaEventDestroy(link->endRemoteComm);
656: cudaStreamDestroy(link->remoteCommStream);
658: /* nvshmem does not need buffers on host, which should be NULL */
659: PetscNvshmemFree(link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);
660: PetscNvshmemFree(link->leafSendSig);
661: PetscNvshmemFree(link->leafRecvSig);
662: PetscNvshmemFree(link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);
663: PetscNvshmemFree(link->rootSendSig);
664: PetscNvshmemFree(link->rootRecvSig);
665: return 0;
666: }
668: PetscErrorCode PetscSFLinkCreate_NVSHMEM(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,const void *leafdata,MPI_Op op,PetscSFOperation sfop,PetscSFLink *mylink)
669: {
670: cudaError_t cerr;
671: PetscSF_Basic *bas = (PetscSF_Basic*)sf->data;
672: PetscSFLink *p,link;
673: PetscBool match,rootdirect[2],leafdirect[2];
674: int greatestPriority;
676: /* Check to see if we can directly send/recv root/leafdata with the given sf, sfop and op.
677: We only care root/leafdirect[PETSCSF_REMOTE], since we never need intermeidate buffers in local communication with NVSHMEM.
678: */
679: if (sfop == PETSCSF_BCAST) { /* Move data from rootbuf to leafbuf */
680: if (sf->use_nvshmem_get) {
681: rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* send buffer has to be stand-alone (can't be rootdata) */
682: leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
683: } else {
684: rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
685: leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* Our put-protocol always needs a nvshmem alloc'ed recv buffer */
686: }
687: } else if (sfop == PETSCSF_REDUCE) { /* Move data from leafbuf to rootbuf */
688: if (sf->use_nvshmem_get) {
689: rootdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(rootmtype) && bas->rootcontig[PETSCSF_REMOTE] && op == MPI_REPLACE) ? PETSC_TRUE : PETSC_FALSE;
690: leafdirect[PETSCSF_REMOTE] = PETSC_FALSE;
691: } else {
692: rootdirect[PETSCSF_REMOTE] = PETSC_FALSE;
693: leafdirect[PETSCSF_REMOTE] = (PetscMemTypeNVSHMEM(leafmtype) && sf->leafcontig[PETSCSF_REMOTE]) ? PETSC_TRUE : PETSC_FALSE;
694: }
695: } else { /* PETSCSF_FETCH */
696: rootdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* FETCH always need a separate rootbuf */
697: leafdirect[PETSCSF_REMOTE] = PETSC_FALSE; /* We also force allocating a separate leafbuf so that leafdata and leafupdate can share mpi requests */
698: }
700: /* Look for free nvshmem links in cache */
701: for (p=&bas->avail; (link=*p); p=&link->next) {
702: if (link->use_nvshmem) {
703: MPIPetsc_Type_compare(unit,link->unit,&match);
704: if (match) {
705: *p = link->next; /* Remove from available list */
706: goto found;
707: }
708: }
709: }
710: PetscNew(&link);
711: PetscSFLinkSetUp_Host(sf,link,unit); /* Compute link->unitbytes, dup link->unit etc. */
712: if (sf->backend == PETSCSF_BACKEND_CUDA) PetscSFLinkSetUp_CUDA(sf,link,unit); /* Setup pack routines, streams etc */
713: #if defined(PETSC_HAVE_KOKKOS)
714: else if (sf->backend == PETSCSF_BACKEND_KOKKOS) PetscSFLinkSetUp_Kokkos(sf,link,unit);
715: #endif
717: link->rootdirect[PETSCSF_LOCAL] = PETSC_TRUE; /* For the local part we directly use root/leafdata */
718: link->leafdirect[PETSCSF_LOCAL] = PETSC_TRUE;
720: /* Init signals to zero */
721: if (!link->rootSendSig) PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootSendSig);
722: if (!link->rootRecvSig) PetscNvshmemCalloc(bas->nRemoteLeafRanksMax*sizeof(uint64_t),(void**)&link->rootRecvSig);
723: if (!link->leafSendSig) PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafSendSig);
724: if (!link->leafRecvSig) PetscNvshmemCalloc(sf->nRemoteRootRanksMax*sizeof(uint64_t),(void**)&link->leafRecvSig);
726: link->use_nvshmem = PETSC_TRUE;
727: link->rootmtype = PETSC_MEMTYPE_DEVICE; /* Only need 0/1-based mtype from now on */
728: link->leafmtype = PETSC_MEMTYPE_DEVICE;
729: /* Overwrite some function pointers set by PetscSFLinkSetUp_CUDA */
730: link->Destroy = PetscSFLinkDestroy_NVSHMEM;
731: if (sf->use_nvshmem_get) { /* get-based protocol */
732: link->PrePack = PetscSFLinkWaitSignalsOfCompletionOfGettingData_NVSHMEM;
733: link->StartCommunication = PetscSFLinkGetDataBegin_NVSHMEM;
734: link->FinishCommunication = PetscSFLinkGetDataEnd_NVSHMEM;
735: } else { /* put-based protocol */
736: link->StartCommunication = PetscSFLinkPutDataBegin_NVSHMEM;
737: link->FinishCommunication = PetscSFLinkPutDataEnd_NVSHMEM;
738: link->PostUnpack = PetscSFLinkSendSignalsToAllowPuttingData_NVSHMEM;
739: }
741: cudaDeviceGetStreamPriorityRange(NULL,&greatestPriority);
742: cudaStreamCreateWithPriority(&link->remoteCommStream,cudaStreamNonBlocking,greatestPriority);
744: cudaEventCreateWithFlags(&link->dataReady,cudaEventDisableTiming);
745: cudaEventCreateWithFlags(&link->endRemoteComm,cudaEventDisableTiming);
747: found:
748: if (rootdirect[PETSCSF_REMOTE]) {
749: link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)rootdata + bas->rootstart[PETSCSF_REMOTE]*link->unitbytes;
750: } else {
751: if (!link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) {
752: PetscNvshmemMalloc(bas->rootbuflen_rmax*link->unitbytes,(void**)&link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);
753: }
754: link->rootbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->rootbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
755: }
757: if (leafdirect[PETSCSF_REMOTE]) {
758: link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = (char*)leafdata + sf->leafstart[PETSCSF_REMOTE]*link->unitbytes;
759: } else {
760: if (!link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]) {
761: PetscNvshmemMalloc(sf->leafbuflen_rmax*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE]);
762: }
763: link->leafbuf[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE] = link->leafbuf_alloc[PETSCSF_REMOTE][PETSC_MEMTYPE_DEVICE];
764: }
766: link->rootdirect[PETSCSF_REMOTE] = rootdirect[PETSCSF_REMOTE];
767: link->leafdirect[PETSCSF_REMOTE] = leafdirect[PETSCSF_REMOTE];
768: link->rootdata = rootdata; /* root/leafdata are keys to look up links in PetscSFXxxEnd */
769: link->leafdata = leafdata;
770: link->next = bas->inuse;
771: bas->inuse = link;
772: *mylink = link;
773: return 0;
774: }
776: #if defined(PETSC_USE_REAL_SINGLE)
777: PetscErrorCode PetscNvshmemSum(PetscInt count,float *dst,const float *src)
778: {
779: PetscMPIInt num; /* Assume nvshmem's int is MPI's int */
781: PetscMPIIntCast(count,&num);
782: nvshmemx_float_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
783: return 0;
784: }
786: PetscErrorCode PetscNvshmemMax(PetscInt count,float *dst,const float *src)
787: {
788: PetscMPIInt num;
790: PetscMPIIntCast(count,&num);
791: nvshmemx_float_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
792: return 0;
793: }
794: #elif defined(PETSC_USE_REAL_DOUBLE)
795: PetscErrorCode PetscNvshmemSum(PetscInt count,double *dst,const double *src)
796: {
797: PetscMPIInt num;
799: PetscMPIIntCast(count,&num);
800: nvshmemx_double_sum_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
801: return 0;
802: }
804: PetscErrorCode PetscNvshmemMax(PetscInt count,double *dst,const double *src)
805: {
806: PetscMPIInt num;
808: PetscMPIIntCast(count,&num);
809: nvshmemx_double_max_reduce_on_stream(NVSHMEM_TEAM_WORLD,dst,src,num,PetscDefaultCudaStream);
810: return 0;
811: }
812: #endif