Actual source code: ex183.c

petsc-3.7.4 2016-10-02
Report Typos and Errors
  1: static char help[] =
  2:   "Example of extracting an array of MPI submatrices from a given MPI matrix.\n"
  3:   "This test can only be run in parallel.\n"
  4:   "\n";

  6: /*T
  7:    Concepts: Mat^mat submatrix, parallel
  8:    Processors: n
  9: T*/


 12: #include <petscmat.h>

 16: int main(int argc, char **args)
 17: {
 18:   Mat             A,*submats;
 19:   MPI_Comm        subcomm;
 20:   PetscMPIInt     rank,size,subrank,subsize,color;
 21:   PetscInt        m,n,N,bs,rstart,rend,i,j,k,total_subdomains,hash,nsubdomains=1;
 22:   PetscInt        nis,*cols,gnsubdomains,gsubdomainnums[1],gsubdomainperm[1],s,gs;
 23:   PetscInt        *rowindices,*colindices,idx,rep;
 24:   PetscScalar     *vals;
 25:   IS              rowis[1],colis[1];
 26:   PetscViewer     viewer;
 27:   PetscBool       permute_indices,flg;
 28:   PetscErrorCode  ierr;

 30:   PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
 31:   MPI_Comm_size(PETSC_COMM_WORLD,&size);
 32:   MPI_Comm_rank(PETSC_COMM_WORLD,&rank);

 34:   PetscOptionsBegin(PETSC_COMM_WORLD,NULL,"ex183","Mat");
 35:   m = 5;
 36:   PetscOptionsInt("-m","Local matrix size","MatSetSizes",m,&m,&flg);
 37:   total_subdomains = size-1;
 38:   PetscOptionsInt("-total_subdomains","Number of submatrices where 0 < n < comm size","MatGetSubMatricesMPI",total_subdomains,&total_subdomains,&flg);
 39:   permute_indices = PETSC_FALSE;
 40:   PetscOptionsBool("-permute_indices","Whether to permute indices before breaking them into subdomains","ISCreateGeneral",permute_indices,&permute_indices,&flg);
 41:   hash = 7;
 42:   PetscOptionsInt("-hash","Permutation factor, which has to be relatively prime to M = size*m (total matrix size)","ISCreateGeneral",hash,&hash,&flg);
 43:   rep = 2;
 44:   PetscOptionsInt("-rep","Number of times to carry out submatrix extractions; currently only 1 & 2 are supported",NULL,rep,&rep,&flg);
 45:   PetscOptionsEnd();

 47:   if (total_subdomains > size) SETERRQ2(PETSC_COMM_WORLD,PETSC_ERR_ARG_WRONG,"Number of subdomains %D must not exceed comm size %D",total_subdomains,size);
 48:   if (total_subdomains < 1 || total_subdomains > size) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"number of subdomains must be > 0 and <= %D (comm size), got total_subdomains = %D",size,total_subdomains);
 49:   if (rep != 1 && rep != 2) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid number of test repetitions: %D; must be 1 or 2",rep);

 51:   viewer = PETSC_VIEWER_STDOUT_WORLD;
 52:   /* Create logically sparse, but effectively dense matrix for easy verification of submatrix extraction correctness. */
 53:   MatCreate(PETSC_COMM_WORLD,&A);
 54:   MatSetSizes(A,m,m,PETSC_DECIDE,PETSC_DECIDE);
 55:   MatSetFromOptions(A);
 56:   MatSetUp(A);
 57:   MatGetSize(A,NULL,&N);
 58:   MatGetLocalSize(A,NULL,&n);
 59:   MatGetBlockSize(A,&bs);
 60:   MatSeqAIJSetPreallocation(A,n,NULL);
 61:   MatMPIAIJSetPreallocation(A,n,NULL,N-n,NULL);
 62:   MatSeqBAIJSetPreallocation(A,bs,n/bs,NULL);
 63:   MatMPIBAIJSetPreallocation(A,bs,n/bs,NULL,(N-n)/bs,NULL);
 64:   MatSeqSBAIJSetPreallocation(A,bs,n/bs,NULL);
 65:   MatMPISBAIJSetPreallocation(A,bs,n/bs,NULL,(N-n)/bs,NULL);

 67:   PetscMalloc2(N,&cols,N,&vals);
 68:   MatGetOwnershipRange(A,&rstart,&rend);
 69:   for (j = 0; j < N; ++j) cols[j] = j;
 70:   for (i=rstart; i<rend; i++) {
 71:     for (j=0;j<N;++j) {
 72:       vals[j] = i*10000+j;
 73:     }
 74:     MatSetValues(A,1,&i,N,cols,vals,INSERT_VALUES);
 75:   }
 76:   PetscFree2(cols,vals);
 77:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
 78:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

 80:   PetscViewerASCIIPrintf(viewer,"Initial matrix:\n");
 81:   MatView(A,viewer);


 84:   /*
 85:      Create subcomms and ISs so that each rank participates in one IS.
 86:      The IS either coalesces adjacent rank indices (contiguous),
 87:      or selects indices by scrambling them using a hash.
 88:   */
 89:   k = size/total_subdomains + (size%total_subdomains>0); /* There are up to k ranks to a color */
 90:   color = rank/k;
 91:   MPI_Comm_split(PETSC_COMM_WORLD,color,rank,&subcomm);
 92:   MPI_Comm_size(subcomm,&subsize);
 93:   MPI_Comm_rank(subcomm,&subrank);
 94:   MatGetOwnershipRange(A,&rstart,&rend);
 95:   nis = 1;
 96:   PetscMalloc2(rend-rstart,&rowindices,rend-rstart,&colindices);

 98:   for (j = rstart; j < rend; ++j) {
 99:     if (permute_indices) {
100:       idx = (j*hash);
101:     } else {
102:       idx = j;
103:     }
104:     rowindices[j-rstart] = idx%N;
105:     colindices[j-rstart] = (idx+m)%N;
106:   }
107:   ISCreateGeneral(subcomm,rend-rstart,rowindices,PETSC_COPY_VALUES,&rowis[0]);
108:   ISCreateGeneral(subcomm,rend-rstart,colindices,PETSC_COPY_VALUES,&colis[0]);
109:   ISSort(rowis[0]);
110:   ISSort(colis[0]);
111:   PetscFree2(rowindices,colindices);
112:   /*
113:     Now view the ISs.  To avoid deadlock when viewing a list of objects on different subcomms,
114:     we need to obtain the global numbers of our local objects and wait for the corresponding global
115:     number to be viewed.
116:   */
117:   PetscViewerASCIIPrintf(viewer,"Subdomains");
118:   if (permute_indices) {
119:     PetscViewerASCIIPrintf(viewer," (hash=%D)",hash);
120:   }
121:   PetscViewerASCIIPrintf(viewer,":\n");
122:   PetscViewerFlush(viewer);

124:   nsubdomains = 1;
125:   for (s = 0; s < nsubdomains; ++s) gsubdomainperm[s] = s;
126:   PetscObjectsListGetGlobalNumbering(PETSC_COMM_WORLD,1,(PetscObject*)rowis,&gnsubdomains,gsubdomainnums);
127:   PetscSortIntWithPermutation(nsubdomains,gsubdomainnums,gsubdomainperm);
128:   for (gs=0,s=0; gs < gnsubdomains;++gs) {
129:     if (s < nsubdomains) {
130:       PetscInt ss;
131:       ss = gsubdomainperm[s];
132:       if (gs == gsubdomainnums[ss]) { /* Global subdomain gs being viewed is my subdomain with local number ss. */
133:         PetscViewer subviewer = NULL;
134:         PetscViewerGetSubViewer(viewer,PetscObjectComm((PetscObject)rowis[ss]),&subviewer);
135:         PetscViewerASCIIPrintf(subviewer,"Row IS %D\n",gs);
136:         ISView(rowis[ss],subviewer);
137:         PetscViewerFlush(subviewer);
138:         PetscViewerASCIIPrintf(subviewer,"Col IS %D\n",gs);
139:         ISView(colis[ss],subviewer);
140:         PetscViewerRestoreSubViewer(viewer,PetscObjectComm((PetscObject)rowis[ss]),&subviewer);
141:         ++s;
142:       }
143:     }
144:     MPI_Barrier(PETSC_COMM_WORLD);
145:   }
146:   PetscViewerFlush(viewer);
147:   ISSort(rowis[0]);
148:   ISSort(colis[0]);
149:   nsubdomains = 1;
150:   MatGetSubMatricesMPI(A,nsubdomains,rowis,colis,MAT_INITIAL_MATRIX,&submats);
151:   /*
152:     Now view the matrices.  To avoid deadlock when viewing a list of objects on different subcomms,
153:     we need to obtain the global numbers of our local objects and wait for the corresponding global
154:     number to be viewed.
155:   */
156:   PetscViewerASCIIPrintf(viewer,"Submatrices (repetition 1):\n");
157:   for (s = 0; s < nsubdomains; ++s) gsubdomainperm[s] = s;
158:   PetscObjectsListGetGlobalNumbering(PETSC_COMM_WORLD,1,(PetscObject*)submats,&gnsubdomains,gsubdomainnums);
159:   PetscSortIntWithPermutation(nsubdomains,gsubdomainnums,gsubdomainperm);
160:   for (gs=0,s=0; gs < gnsubdomains;++gs) {
161:     if (s < nsubdomains) {
162:       PetscInt ss;
163:       ss = gsubdomainperm[s];
164:       if (gs == gsubdomainnums[ss]) { /* Global subdomain gs being viewed is my subdomain with local number ss. */
165:         PetscViewer subviewer = NULL;
166:         PetscViewerGetSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
167:         MatView(submats[ss],subviewer);
168:         PetscViewerRestoreSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
169:         ++s;
170:       }
171:     }
172:     MPI_Barrier(PETSC_COMM_WORLD);
173:   }
174:   PetscViewerFlush(viewer);
175:   if (rep == 1) goto cleanup;
176:   nsubdomains = 1;
177:   MatGetSubMatricesMPI(A,nsubdomains,rowis,colis,MAT_REUSE_MATRIX,&submats);
178:   /*
179:     Now view the matrices.  To avoid deadlock when viewing a list of objects on different subcomms,
180:     we need to obtain the global numbers of our local objects and wait for the corresponding global
181:     number to be viewed.
182:   */
183:   PetscViewerASCIIPrintf(viewer,"Submatrices (repetition 2):\n");
184:   for (s = 0; s < nsubdomains; ++s) gsubdomainperm[s] = s;
185:   PetscObjectsListGetGlobalNumbering(PETSC_COMM_WORLD,1,(PetscObject*)submats,&gnsubdomains,gsubdomainnums);
186:   PetscSortIntWithPermutation(nsubdomains,gsubdomainnums,gsubdomainperm);
187:   for (gs=0,s=0; gs < gnsubdomains;++gs) {
188:     if (s < nsubdomains) {
189:       PetscInt ss;
190:       ss = gsubdomainperm[s];
191:       if (gs == gsubdomainnums[ss]) { /* Global subdomain gs being viewed is my subdomain with local number ss. */
192:         PetscViewer subviewer = NULL;
193:         PetscViewerGetSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
194:         MatView(submats[ss],subviewer);
195:         PetscViewerRestoreSubViewer(viewer,PetscObjectComm((PetscObject)submats[ss]),&subviewer);
196:         ++s;
197:       }
198:     }
199:     MPI_Barrier(PETSC_COMM_WORLD);
200:   }
201:   cleanup:
202:   for (k=0;k<nsubdomains;++k) {
203:     MatDestroy(submats+k);
204:   }
205:   PetscFree(submats);
206:   for (k=0;k<nis;++k) {
207:     ISDestroy(rowis+k);
208:     ISDestroy(colis+k);
209:   }
210:   MatDestroy(&A);
211:   MPI_Comm_free(&subcomm);
212:   PetscFinalize();
213:   return ierr;
214: }