Actual source code: mpiptap.c

petsc-3.4.2 2013-07-02
  2: /*
  3:   Defines projective product routines where A is a MPIAIJ matrix
  4:           C = P^T * A * P
  5: */

  7: #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
  8: #include <../src/mat/utils/freespace.h>
  9: #include <../src/mat/impls/aij/mpi/mpiaij.h>
 10: #include <petscbt.h>
 11: #include <petsctime.h>

 13: /* #define PTAP_PROFILE */

 15: extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
 18: PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
 19: {
 21:   Mat_MPIAIJ     *a   =(Mat_MPIAIJ*)A->data;
 22:   Mat_PtAPMPI    *ptap=a->ptap;

 25:   if (ptap) {
 26:     Mat_Merge_SeqsToMPI *merge=ptap->merge;
 27:     PetscFree2(ptap->startsj_s,ptap->startsj_r);
 28:     PetscFree(ptap->bufa);
 29:     MatDestroy(&ptap->P_loc);
 30:     MatDestroy(&ptap->P_oth);
 31:     MatDestroy(&ptap->A_loc); /* used by MatTransposeMatMult() */
 32:     if (ptap->api) {PetscFree(ptap->api);}
 33:     if (ptap->apj) {PetscFree(ptap->apj);}
 34:     if (ptap->apa) {PetscFree(ptap->apa);}
 35:     if (merge) {
 36:       PetscFree(merge->id_r);
 37:       PetscFree(merge->len_s);
 38:       PetscFree(merge->len_r);
 39:       PetscFree(merge->bi);
 40:       PetscFree(merge->bj);
 41:       PetscFree(merge->buf_ri[0]);
 42:       PetscFree(merge->buf_ri);
 43:       PetscFree(merge->buf_rj[0]);
 44:       PetscFree(merge->buf_rj);
 45:       PetscFree(merge->coi);
 46:       PetscFree(merge->coj);
 47:       PetscFree(merge->owners_co);
 48:       PetscLayoutDestroy(&merge->rowmap);
 49:       merge->destroy(A);
 50:       PetscFree(ptap->merge);
 51:     }
 52:     PetscFree(ptap);
 53:   }
 54:   return(0);
 55: }

 59: PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
 60: {
 61:   PetscErrorCode      ierr;
 62:   Mat_MPIAIJ          *a     = (Mat_MPIAIJ*)A->data;
 63:   Mat_PtAPMPI         *ptap  = a->ptap;
 64:   Mat_Merge_SeqsToMPI *merge = ptap->merge;

 67:   (*merge->duplicate)(A,op,M);

 69:   (*M)->ops->destroy   = merge->destroy;
 70:   (*M)->ops->duplicate = merge->duplicate;
 71:   return(0);
 72: }

 76: PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
 77: {

 81:   if (scall == MAT_INITIAL_MATRIX) {
 82:     PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);
 83:     MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);
 84:     PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);
 85:   }
 86:   PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);
 87:   MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);
 88:   PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);
 89:   return(0);
 90: }

 94: PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
 95: {
 96:   PetscErrorCode      ierr;
 97:   Mat                 Cmpi;
 98:   Mat_PtAPMPI         *ptap;
 99:   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
100:   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
101:   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
102:   Mat_SeqAIJ          *p_loc,*p_oth;
103:   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
104:   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
105:   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
106:   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
107:   PetscBT             lnkbt;
108:   MPI_Comm            comm;
109:   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
110:   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
111:   PetscInt            len,proc,*dnz,*onz,*owners;
112:   PetscInt            nzi,*pti,*ptj;
113:   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
114:   MPI_Request         *swaits,*rwaits;
115:   MPI_Status          *sstatus,rstatus;
116:   Mat_Merge_SeqsToMPI *merge;
117:   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
118:   PetscReal           afill=1.0,afill_tmp;
119:   PetscInt            rmax;
120: #if defined(PTAP_PROFILE)
121:   PetscLogDouble t0,t1,t2,t3,t4;
122: #endif

125:   PetscObjectGetComm((PetscObject)A,&comm);
126: #if defined(PTAP_PROFILE)
127:   PetscTime(&t0);
128: #endif

130:   /* check if matrix local sizes are compatible */
131:   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
132:     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
133:   }
134:   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
135:     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
136:   }

138:   MPI_Comm_size(comm,&size);
139:   MPI_Comm_rank(comm,&rank);

141:   /* create struct Mat_PtAPMPI and attached it to C later */
142:   PetscNew(Mat_PtAPMPI,&ptap);
143:   PetscNew(Mat_Merge_SeqsToMPI,&merge);
144:   ptap->merge = merge;
145:   ptap->reuse = MAT_INITIAL_MATRIX;

147:   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
148:   MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);

150:   /* get P_loc by taking all local rows of P */
151:   MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);

153:   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
154:   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
155:   pi_loc = p_loc->i; pj_loc = p_loc->j;
156:   pi_oth = p_oth->i; pj_oth = p_oth->j;
157: #if defined(PTAP_PROFILE)
158:   PetscTime(&t1);
159: #endif

161:   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
162:   /*-------------------------------------------------------------------*/
163:   PetscMalloc((am+1)*sizeof(PetscInt),&api);
164:   api[0] = 0;

166:   /* create and initialize a linked list */
167:   PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);

169:   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
170:   PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);

172:   current_space = free_space;

174:   for (i=0; i<am; i++) {
175:     /* diagonal portion of A */
176:     nzi = adi[i+1] - adi[i];
177:     aj  = ad->j + adi[i];
178:     for (j=0; j<nzi; j++) {
179:       row  = aj[j];
180:       pnz  = pi_loc[row+1] - pi_loc[row];
181:       Jptr = pj_loc + pi_loc[row];
182:       /* add non-zero cols of P into the sorted linked list lnk */
183:       PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
184:     }
185:     /* off-diagonal portion of A */
186:     nzi = aoi[i+1] - aoi[i];
187:     aj  = ao->j + aoi[i];
188:     for (j=0; j<nzi; j++) {
189:       row  = aj[j];
190:       pnz  = pi_oth[row+1] - pi_oth[row];
191:       Jptr = pj_oth + pi_oth[row];
192:       PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);
193:     }
194:     apnz     = lnk[0];
195:     api[i+1] = api[i] + apnz;
196:     if (ap_rmax < apnz) ap_rmax = apnz;

198:     /* if free space is not available, double the total space in the list */
199:     if (current_space->local_remaining<apnz) {
200:       PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);
201:       nspacedouble++;
202:     }

204:     /* Copy data into free space, then initialize lnk */
205:     PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);

207:     current_space->array           += apnz;
208:     current_space->local_used      += apnz;
209:     current_space->local_remaining -= apnz;
210:   }

212:   /* Allocate space for apj, initialize apj, and */
213:   /* destroy list of free space and other temporary array(s) */
214:   PetscMalloc((api[am]+1)*sizeof(PetscInt),&apj);
215:   PetscFreeSpaceContiguous(&free_space,apj);
216:   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
217:   if (afill_tmp > afill) afill = afill_tmp;

219: #if defined(PTAP_PROFILE)
220:   PetscTime(&t2);
221: #endif

223:   /* determine symbolic Co=(p->B)^T*AP - send to others */
224:   /*----------------------------------------------------*/
225:   MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);

227:   /* then, compute symbolic Co = (p->B)^T*AP */
228:   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
229:                          >= (num of nonzero rows of C_seq) - pn */
230:   PetscMalloc((pon+1)*sizeof(PetscInt),&coi);
231:   coi[0] = 0;

233:   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
234:   nnz           = fill*(poti[pon] + api[am]);
235:   PetscFreeSpaceGet(nnz,&free_space);
236:   current_space = free_space;

238:   for (i=0; i<pon; i++) {
239:     pnz = poti[i+1] - poti[i];
240:     ptJ = potj + poti[i];
241:     for (j=0; j<pnz; j++) {
242:       row  = ptJ[j]; /* row of AP == col of Pot */
243:       apnz = api[row+1] - api[row];
244:       Jptr = apj + api[row];
245:       /* add non-zero cols of AP into the sorted linked list lnk */
246:       PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
247:     }
248:     nnz = lnk[0];

250:     /* If free space is not available, double the total space in the list */
251:     if (current_space->local_remaining<nnz) {
252:       PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);
253:       nspacedouble++;
254:     }

256:     /* Copy data into free space, and zero out denserows */
257:     PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);

259:     current_space->array           += nnz;
260:     current_space->local_used      += nnz;
261:     current_space->local_remaining -= nnz;

263:     coi[i+1] = coi[i] + nnz;
264:   }
265:   PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);
266:   PetscFreeSpaceContiguous(&free_space,coj);
267:   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
268:   if (afill_tmp > afill) afill = afill_tmp;
269:   MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);

271:   /* send j-array (coj) of Co to other processors */
272:   /*----------------------------------------------*/
273:   /* determine row ownership */
274:   PetscLayoutCreate(comm,&merge->rowmap);
275:   merge->rowmap->n  = pn;
276:   merge->rowmap->bs = 1;

278:   PetscLayoutSetUp(merge->rowmap);
279:   owners = merge->rowmap->range;

281:   /* determine the number of messages to send, their lengths */
282:   PetscMalloc2(size,PetscMPIInt,&len_si,size,MPI_Status,&sstatus);
283:   PetscMemzero(len_si,size*sizeof(PetscMPIInt));
284:   PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);

286:   len_s        = merge->len_s;
287:   merge->nsend = 0;

289:   PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);
290:   PetscMemzero(len_s,size*sizeof(PetscMPIInt));

292:   proc = 0;
293:   for (i=0; i<pon; i++) {
294:     while (prmap[i] >= owners[proc+1]) proc++;
295:     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
296:     len_s[proc] += coi[i+1] - coi[i];
297:   }

299:   len          = 0; /* max length of buf_si[] */
300:   owners_co[0] = 0;
301:   for (proc=0; proc<size; proc++) {
302:     owners_co[proc+1] = owners_co[proc] + len_si[proc];
303:     if (len_si[proc]) {
304:       merge->nsend++;
305:       len_si[proc] = 2*(len_si[proc] + 1);
306:       len         += len_si[proc];
307:     }
308:   }

310:   /* determine the number and length of messages to receive for coi and coj  */
311:   PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);
312:   PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);

314:   /* post the Irecv and Isend of coj */
315:   PetscCommGetNewTag(comm,&tagj);
316:   PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);
317:   PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&swaits);
318:   for (proc=0, k=0; proc<size; proc++) {
319:     if (!len_s[proc]) continue;
320:     i    = owners_co[proc];
321:     MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);
322:     k++;
323:   }

325:   /* receives and sends of coj are complete */
326:   for (i=0; i<merge->nrecv; i++) {
327:     MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
328:   }
329:   PetscFree(rwaits);
330:   if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}

332:   /* send and recv coi */
333:   /*-------------------*/
334:   PetscCommGetNewTag(comm,&tagi);
335:   PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);
336:   PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);
337:   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
338:   for (proc=0,k=0; proc<size; proc++) {
339:     if (!len_s[proc]) continue;
340:     /* form outgoing message for i-structure:
341:          buf_si[0]:                 nrows to be sent
342:                [1:nrows]:           row index (global)
343:                [nrows+1:2*nrows+1]: i-structure index
344:     */
345:     /*-------------------------------------------*/
346:     nrows       = len_si[proc]/2 - 1;
347:     buf_si_i    = buf_si + nrows+1;
348:     buf_si[0]   = nrows;
349:     buf_si_i[0] = 0;
350:     nrows       = 0;
351:     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
352:       nzi = coi[i+1] - coi[i];

354:       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi; /* i-structure */
355:       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
356:       nrows++;
357:     }
358:     MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);
359:     k++;
360:     buf_si += len_si[proc];
361:   }
362:   i = merge->nrecv;
363:   while (i--) {
364:     MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);
365:   }
366:   PetscFree(rwaits);
367:   if (merge->nsend) {MPI_Waitall(merge->nsend,swaits,sstatus);}

369:   PetscFree2(len_si,sstatus);
370:   PetscFree(len_ri);
371:   PetscFree(swaits);
372:   PetscFree(buf_s);

374: #if defined(PTAP_PROFILE)
375:   PetscTime(&t3);
376: #endif

378:   /* compute the local portion of C (mpi mat) */
379:   /*------------------------------------------*/
380:   MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);

382:   /* allocate pti array and free space for accumulating nonzero column info */
383:   PetscMalloc((pn+1)*sizeof(PetscInt),&pti);
384:   pti[0] = 0;

386:   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
387:   nnz           = fill*(pi_loc[pm] + api[am]);
388:   PetscFreeSpaceGet(nnz,&free_space);
389:   current_space = free_space;

391:   PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);
392:   for (k=0; k<merge->nrecv; k++) {
393:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
394:     nrows       = *buf_ri_k[k];
395:     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
396:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
397:   }
398:   MatPreallocateInitialize(comm,pn,pn,dnz,onz);
399:   rmax = 0;
400:   for (i=0; i<pn; i++) {
401:     /* add pdt[i,:]*AP into lnk */
402:     pnz = pdti[i+1] - pdti[i];
403:     ptJ = pdtj + pdti[i];
404:     for (j=0; j<pnz; j++) {
405:       row  = ptJ[j];  /* row of AP == col of Pt */
406:       apnz = api[row+1] - api[row];
407:       Jptr = apj + api[row];
408:       /* add non-zero cols of AP into the sorted linked list lnk */
409:       PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);
410:     }

412:     /* add received col data into lnk */
413:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
414:       if (i == *nextrow[k]) { /* i-th row */
415:         nzi  = *(nextci[k]+1) - *nextci[k];
416:         Jptr = buf_rj[k] + *nextci[k];
417:         PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);
418:         nextrow[k]++; nextci[k]++;
419:       }
420:     }
421:     nnz = lnk[0];

423:     /* if free space is not available, make more free space */
424:     if (current_space->local_remaining<nnz) {
425:       PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);
426:       nspacedouble++;
427:     }
428:     /* copy data into free space, then initialize lnk */
429:     PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);
430:     MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);

432:     current_space->array           += nnz;
433:     current_space->local_used      += nnz;
434:     current_space->local_remaining -= nnz;

436:     pti[i+1] = pti[i] + nnz;
437:     if (nnz > rmax) rmax = nnz;
438:   }
439:   MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);
440:   PetscFree3(buf_ri_k,nextrow,nextci);

442:   PetscMalloc((pti[pn]+1)*sizeof(PetscInt),&ptj);
443:   PetscFreeSpaceContiguous(&free_space,ptj);
444:   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
445:   if (afill_tmp > afill) afill = afill_tmp;
446:   PetscLLDestroy(lnk,lnkbt);

448:   /* create symbolic parallel matrix Cmpi */
449:   /*--------------------------------------*/
450:   MatCreate(comm,&Cmpi);
451:   MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);
452:   MatSetBlockSizes(Cmpi,P->cmap->bs,P->cmap->bs);
453:   MatSetType(Cmpi,MATMPIAIJ);
454:   MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);
455:   MatPreallocateFinalize(dnz,onz);

457:   merge->bi        = pti;      /* Cseq->i */
458:   merge->bj        = ptj;      /* Cseq->j */
459:   merge->coi       = coi;      /* Co->i   */
460:   merge->coj       = coj;      /* Co->j   */
461:   merge->buf_ri    = buf_ri;
462:   merge->buf_rj    = buf_rj;
463:   merge->owners_co = owners_co;
464:   merge->destroy   = Cmpi->ops->destroy;
465:   merge->duplicate = Cmpi->ops->duplicate;

467:   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
468:   Cmpi->assembled      = PETSC_FALSE;
469:   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
470:   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;

472:   /* attach the supporting struct to Cmpi for reuse */
473:   c           = (Mat_MPIAIJ*)Cmpi->data;
474:   c->ptap     = ptap;
475:   ptap->api   = api;
476:   ptap->apj   = apj;
477:   ptap->rmax  = ap_rmax;
478:   *C          = Cmpi;

480:   /* flag 'scalable' determines which implementations to be used:
481:        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
482:        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
483:   /* set default scalable */
484:   ptap->scalable = PETSC_TRUE;

486:   PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);
487:   if (!ptap->scalable) {  /* Do dense axpy */
488:     PetscMalloc(pN*sizeof(PetscScalar),&ptap->apa);
489:     PetscMemzero(ptap->apa,pN*sizeof(PetscScalar));
490:   } else {
491:     PetscMalloc((ap_rmax+1)*sizeof(PetscScalar),&ptap->apa);
492:     PetscMemzero(ptap->apa,ap_rmax*sizeof(PetscScalar));
493:   }

495: #if defined(PTAP_PROFILE)
496:   PetscTime(&t4);
497:   if (rank==1) PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPSymbolic %g/P + %g/AP + %g/comm + %g/PtAP = %g\n",rank,t1-t0,t2-t1,t3-t2,t4-t3,t4-t0);
498: #endif

500: #if defined(PETSC_USE_INFO)
501:   if (pti[pn] != 0) {
502:     PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %G needed %G.\n",nspacedouble,fill,afill);
503:     PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%G,&C) for best performance.\n",afill);
504:   } else {
505:     PetscInfo(Cmpi,"Empty matrix product\n");
506:   }
507: #endif
508:   return(0);
509: }

513: PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
514: {
515:   PetscErrorCode      ierr;
516:   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
517:   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
518:   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
519:   Mat_SeqAIJ          *p_loc,*p_oth;
520:   Mat_PtAPMPI         *ptap;
521:   Mat_Merge_SeqsToMPI *merge;
522:   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
523:   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
524:   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
525:   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
526:   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
527:   MPI_Comm            comm;
528:   PetscMPIInt         size,rank,taga,*len_s;
529:   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
530:   PetscInt            **buf_ri,**buf_rj;
531:   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
532:   MPI_Request         *s_waits,*r_waits;
533:   MPI_Status          *status;
534:   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
535:   PetscInt            *api,*apj,*coi,*coj;
536:   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
537:   PetscBool           scalable;
538: #if defined(PTAP_PROFILE)
539:   PetscLogDouble t0,t1,t2,t3,t4,et2_AP=0.0,et2_PtAP=0.0,t2_0,t2_1,t2_2;
540: #endif

543:   PetscObjectGetComm((PetscObject)C,&comm);
544: #if defined(PTAP_PROFILE)
545:   PetscTime(&t0);
546: #endif
547:   MPI_Comm_size(comm,&size);
548:   MPI_Comm_rank(comm,&rank);

550:   ptap = c->ptap;
551:   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
552:   merge    = ptap->merge;
553:   apa      = ptap->apa;
554:   scalable = ptap->scalable;

556:   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
557:   /*--------------------------------------------------*/
558:   if (ptap->reuse == MAT_INITIAL_MATRIX) {
559:     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
560:     ptap->reuse = MAT_REUSE_MATRIX;
561:   } else { /* update numerical values of P_oth and P_loc */
562:     MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);
563:     MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);
564:   }
565: #if defined(PTAP_PROFILE)
566:   PetscTime(&t1);
567: #endif

569:   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
570:   /*--------------------------------------------------------------*/
571:   /* get data from symbolic products */
572:   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
573:   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
574:   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
575:   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;

577:   coi  = merge->coi; coj = merge->coj;
578:   PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);
579:   PetscMemzero(coa,coi[pon]*sizeof(MatScalar));

581:   bi     = merge->bi; bj = merge->bj;
582:   owners = merge->rowmap->range;
583:   PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);  /* ba: Cseq->a */
584:   PetscMemzero(ba,bi[cm]*sizeof(MatScalar));

586:   api = ptap->api; apj = ptap->apj;

588:   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but fast */
589:     PetscInfo(C,"Using non-scalable dense axpy\n");
590:     /*-----------------------------------------------------------------------------------------------------*/
591:     for (i=0; i<am; i++) {
592: #if defined(PTAP_PROFILE)
593:       PetscTime(&t2_0);
594: #endif
595:       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
596:       /*------------------------------------------------------------*/
597:       apJ = apj + api[i];

599:       /* diagonal portion of A */
600:       anz = adi[i+1] - adi[i];
601:       adj = ad->j + adi[i];
602:       ada = ad->a + adi[i];
603:       for (j=0; j<anz; j++) {
604:         row = adj[j];
605:         pnz = pi_loc[row+1] - pi_loc[row];
606:         pj  = pj_loc + pi_loc[row];
607:         pa  = pa_loc + pi_loc[row];

609:         /* perform dense axpy */
610:         valtmp = ada[j];
611:         for (k=0; k<pnz; k++) {
612:           apa[pj[k]] += valtmp*pa[k];
613:         }
614:         PetscLogFlops(2.0*pnz);
615:       }

617:       /* off-diagonal portion of A */
618:       anz = aoi[i+1] - aoi[i];
619:       aoj = ao->j + aoi[i];
620:       aoa = ao->a + aoi[i];
621:       for (j=0; j<anz; j++) {
622:         row = aoj[j];
623:         pnz = pi_oth[row+1] - pi_oth[row];
624:         pj  = pj_oth + pi_oth[row];
625:         pa  = pa_oth + pi_oth[row];

627:         /* perform dense axpy */
628:         valtmp = aoa[j];
629:         for (k=0; k<pnz; k++) {
630:           apa[pj[k]] += valtmp*pa[k];
631:         }
632:         PetscLogFlops(2.0*pnz);
633:       }
634: #if defined(PTAP_PROFILE)
635:       PetscTime(&t2_1);
636:       et2_AP += t2_1 - t2_0;
637: #endif

639:       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
640:       /*--------------------------------------------------------------*/
641:       apnz = api[i+1] - api[i];
642:       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
643:       pnz = po->i[i+1] - po->i[i];
644:       poJ = po->j + po->i[i];
645:       pA  = po->a + po->i[i];
646:       for (j=0; j<pnz; j++) {
647:         row = poJ[j];
648:         cnz = coi[row+1] - coi[row];
649:         cj  = coj + coi[row];
650:         ca  = coa + coi[row];
651:         /* perform dense axpy */
652:         valtmp = pA[j];
653:         for (k=0; k<cnz; k++) {
654:           ca[k] += valtmp*apa[cj[k]];
655:         }
656:         PetscLogFlops(2.0*cnz);
657:       }

659:       /* put the value into Cd (diagonal part) */
660:       pnz = pd->i[i+1] - pd->i[i];
661:       pdJ = pd->j + pd->i[i];
662:       pA  = pd->a + pd->i[i];
663:       for (j=0; j<pnz; j++) {
664:         row = pdJ[j];
665:         cnz = bi[row+1] - bi[row];
666:         cj  = bj + bi[row];
667:         ca  = ba + bi[row];
668:         /* perform dense axpy */
669:         valtmp = pA[j];
670:         for (k=0; k<cnz; k++) {
671:           ca[k] += valtmp*apa[cj[k]];
672:         }
673:         PetscLogFlops(2.0*cnz);
674:       }

676:       /* zero the current row of A*P */
677:       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
678: #if defined(PTAP_PROFILE)
679:       PetscTime(&t2_2);
680:       et2_PtAP += t2_2 - t2_1;
681: #endif
682:     }
683:   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
684:     PetscInfo(C,"Using scalable sparse axpy\n");
685:     /*-----------------------------------------------------------------------------------------*/
686:     pA=pa_loc;
687:     for (i=0; i<am; i++) {
688: #if defined(PTAP_PROFILE)
689:       PetscTime(&t2_0);
690: #endif
691:       /* form i-th sparse row of A*P */
692:       apnz = api[i+1] - api[i];
693:       apJ  = apj + api[i];
694:       /* diagonal portion of A */
695:       anz = adi[i+1] - adi[i];
696:       adj = ad->j + adi[i];
697:       ada = ad->a + adi[i];
698:       for (j=0; j<anz; j++) {
699:         row    = adj[j];
700:         pnz    = pi_loc[row+1] - pi_loc[row];
701:         pj     = pj_loc + pi_loc[row];
702:         pa     = pa_loc + pi_loc[row];
703:         valtmp = ada[j];
704:         nextp  = 0;
705:         for (k=0; nextp<pnz; k++) {
706:           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
707:             apa[k] += valtmp*pa[nextp++];
708:           }
709:         }
710:         PetscLogFlops(2.0*pnz);
711:       }
712:       /* off-diagonal portion of A */
713:       anz = aoi[i+1] - aoi[i];
714:       aoj = ao->j + aoi[i];
715:       aoa = ao->a + aoi[i];
716:       for (j=0; j<anz; j++) {
717:         row    = aoj[j];
718:         pnz    = pi_oth[row+1] - pi_oth[row];
719:         pj     = pj_oth + pi_oth[row];
720:         pa     = pa_oth + pi_oth[row];
721:         valtmp = aoa[j];
722:         nextp  = 0;
723:         for (k=0; nextp<pnz; k++) {
724:           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
725:             apa[k] += valtmp*pa[nextp++];
726:           }
727:         }
728:         PetscLogFlops(2.0*pnz);
729:       }
730: #if defined(PTAP_PROFILE)
731:       PetscTime(&t2_1);
732:       et2_AP += t2_1 - t2_0;
733: #endif

735:       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
736:       /*--------------------------------------------------------------*/
737:       pnz = pi_loc[i+1] - pi_loc[i];
738:       pJ  = pj_loc + pi_loc[i];
739:       for (j=0; j<pnz; j++) {
740:         nextap = 0;
741:         row    = pJ[j]; /* global index */
742:         if (row < pcstart || row >=pcend) { /* put the value into Co */
743:           row = *poJ;
744:           cj  = coj + coi[row];
745:           ca  = coa + coi[row]; poJ++;
746:         } else {                            /* put the value into Cd */
747:           row = *pdJ;
748:           cj  = bj + bi[row];
749:           ca  = ba + bi[row]; pdJ++;
750:         }
751:         valtmp = pA[j];
752:         for (k=0; nextap<apnz; k++) {
753:           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
754:         }
755:         PetscLogFlops(2.0*apnz);
756:       }
757:       pA += pnz;
758:       /* zero the current row info for A*P */
759:       PetscMemzero(apa,apnz*sizeof(MatScalar));
760: #if defined(PTAP_PROFILE)
761:       PetscTime(&t2_2);
762:       et2_PtAP += t2_2 - t2_1;
763: #endif
764:     }
765:   }
766: #if defined(PTAP_PROFILE)
767:   PetscTime(&t2);
768: #endif

770:   /* 3) send and recv matrix values coa */
771:   /*------------------------------------*/
772:   buf_ri = merge->buf_ri;
773:   buf_rj = merge->buf_rj;
774:   len_s  = merge->len_s;
775:   PetscCommGetNewTag(comm,&taga);
776:   PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);

778:   PetscMalloc2(merge->nsend+1,MPI_Request,&s_waits,size,MPI_Status,&status);
779:   for (proc=0,k=0; proc<size; proc++) {
780:     if (!len_s[proc]) continue;
781:     i    = merge->owners_co[proc];
782:     MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);
783:     k++;
784:   }
785:   if (merge->nrecv) {MPI_Waitall(merge->nrecv,r_waits,status);}
786:   if (merge->nsend) {MPI_Waitall(merge->nsend,s_waits,status);}

788:   PetscFree2(s_waits,status);
789:   PetscFree(r_waits);
790:   PetscFree(coa);
791: #if defined(PTAP_PROFILE)
792:   PetscTime(&t3);
793: #endif

795:   /* 4) insert local Cseq and received values into Cmpi */
796:   /*------------------------------------------------------*/
797:   PetscMalloc3(merge->nrecv,PetscInt**,&buf_ri_k,merge->nrecv,PetscInt*,&nextrow,merge->nrecv,PetscInt*,&nextci);
798:   for (k=0; k<merge->nrecv; k++) {
799:     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
800:     nrows       = *(buf_ri_k[k]);
801:     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
802:     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
803:   }

805:   for (i=0; i<cm; i++) {
806:     row  = owners[rank] + i; /* global row index of C_seq */
807:     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
808:     ba_i = ba + bi[i];
809:     bnz  = bi[i+1] - bi[i];
810:     /* add received vals into ba */
811:     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
812:       /* i-th row */
813:       if (i == *nextrow[k]) {
814:         cnz    = *(nextci[k]+1) - *nextci[k];
815:         cj     = buf_rj[k] + *(nextci[k]);
816:         ca     = abuf_r[k] + *(nextci[k]);
817:         nextcj = 0;
818:         for (j=0; nextcj<cnz; j++) {
819:           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
820:             ba_i[j] += ca[nextcj++];
821:           }
822:         }
823:         nextrow[k]++; nextci[k]++;
824:         PetscLogFlops(2.0*cnz);
825:       }
826:     }
827:     MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);
828:   }
829:   MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
830:   MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);

832:   PetscFree(ba);
833:   PetscFree(abuf_r[0]);
834:   PetscFree(abuf_r);
835:   PetscFree3(buf_ri_k,nextrow,nextci);
836: #if defined(PTAP_PROFILE)
837:   PetscTime(&t4);
838:   if (rank==1) PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g + %g ) + %g/comm + %g/Cloc = %g\n\n",rank,t1-t0,t2-t1,et2_AP,et2_PtAP,t3-t2,t4-t3,t4-t0);
839: #endif
840:   return(0);
841: }