00001 #include "laxfriedrichsmethodmpi.h"
00002 #include "netmpi.h"
00003 #include "fxyslabid.h"
00004 #include "numericmethods.h"
00005
00006 #define SEND_GHOSTCELL 201
00007 #define DEAD_ZONE_VALUE 202
00008
00009
00010 void LaxFriedrichsMethodMPI::setCommunicationBuffersWithNoOverlap(OrthoMesh &mesh)
00011 {
00012 unsigned elemsInXY = mesh.numElemY()*mesh.numElemZ();
00013 double dx = mesh.get_dx();
00014 double tol = dx/4.0;
00015
00016
00017
00018 if (NetMPI::rank() != 0)
00019 {
00020 double X0 = mesh.getP()[0];
00021
00022
00023
00024 LGhostCellSndMap.reserve(elemsInXY);
00025 FXYSlabId fSnd(X0,X0+dx,tol);
00026 mesh.getCellsIndicesInDomain(fSnd,LGhostCellSndMap);
00027
00028
00029
00030 LGhostCellRcvMap.reserve(elemsInXY);
00031 FXYSlabId fRcv(X0,X0,tol);
00032 mesh.getFacesIndicesInDomain(fRcv,LGhostCellRcvMap);
00033
00034
00035 LGhostCellRcv.reinit(LGhostCellRcvMap.size());
00036 LGhostCellSnd.reinit(LGhostCellSndMap.size());
00037 }
00038 if (!NetMPI::isLastProcess())
00039 {
00040 double X0 = mesh.getQ()[0];
00041
00042
00043 RGhostCellSndMap.reserve(elemsInXY);
00044 FXYSlabId fSnd(X0,X0-dx,tol);
00045 mesh.getCellsIndicesInDomain(fSnd,RGhostCellSndMap);
00046
00047
00048
00049 RGhostCellRcvMap.reserve(elemsInXY);
00050 FXYSlabId fRcv(X0,X0,tol);
00051 mesh.getFacesIndicesInDomain(fRcv,RGhostCellRcvMap);
00052
00053
00054 RGhostCellRcv.reinit(RGhostCellRcvMap.size());
00055 RGhostCellSnd.reinit(RGhostCellSndMap.size());
00056 }
00057 }
00058
00059
00060
00065 void LaxFriedrichsMethodMPI::setCommunicationBuffers(OrthoMesh &mesh,int mesh_overlap)
00066 {
00067 if (mesh_overlap == 0)
00068 return setCommunicationBuffersWithNoOverlap(mesh);
00069
00070
00071 unsigned elemsInXY = mesh.numElemY()*mesh.numElemZ();
00072 double dx = mesh.get_dx();
00073 double tol = dx/4.0;
00074
00075
00076 fDeadZone.resize(mesh.numFaces(),ACTIVE_ZONE);
00077
00078 if (NetMPI::rank() != 0)
00079 {
00080 double X0 = mesh.getP()[0];
00081 double X1 = X0 + mesh_overlap*dx;
00082 double X2 = X0 + 2*mesh_overlap*dx;
00083
00084 cLDeadZoneRcvMap.reserve(mesh_overlap*elemsInXY);
00085 cLDeadZoneSndMap.reserve(mesh_overlap*elemsInXY);
00086 FXYSlabId fRcv(X0,X1,tol);
00087 FXYSlabId fSnd(X1,X2,tol);
00088 mesh.getCellsIndicesInDomain(fRcv,cLDeadZoneRcvMap);
00089 mesh.getCellsIndicesInDomain(fSnd,cLDeadZoneSndMap);
00090 cLDeadZoneRcv.reinit(cLDeadZoneRcvMap.size());
00091 cLDeadZoneSnd.reinit(cLDeadZoneSndMap.size());
00092
00093 LGhostCellRcvMap.reserve(elemsInXY);
00094 LGhostCellSndMap.reserve(elemsInXY);
00095 FXYSlabId f2Rcv(X1-dx,X1,tol);
00096 FXYSlabId f2Snd(X1,X1+dx,tol);
00097 mesh.getCellsIndicesInDomain(f2Rcv,LGhostCellRcvMap);
00098 mesh.getCellsIndicesInDomain(f2Snd,LGhostCellSndMap);
00099 LGhostCellRcv.reinit(LGhostCellRcvMap.size());
00100 LGhostCellSnd.reinit(LGhostCellSndMap.size());
00101 assert(LGhostCellRcvMap.size() == elemsInXY);
00102 assert(LGhostCellSndMap.size() == elemsInXY);
00103
00104 FXYSlabId fLFaces(X0,X1-dx,tol);
00105 mesh.tagFacesInDomain(fLFaces,fDeadZone, LEFT_DEAD_ZONE);
00106
00107
00108 }
00109 if (!NetMPI::isLastProcess())
00110 {
00111 double X2 = mesh.getQ()[0];
00112 double X1 = X2 - mesh_overlap*dx;
00113 double X0 = X2 - 2*mesh_overlap*dx;
00114
00115 cRDeadZoneRcvMap.reserve(mesh_overlap*elemsInXY);
00116 cRDeadZoneSndMap.reserve(mesh_overlap*elemsInXY);
00117 FXYSlabId fRcv(X1,X2,tol);
00118 FXYSlabId fSnd(X0,X1,tol);
00119 mesh.getCellsIndicesInDomain(fRcv,cRDeadZoneRcvMap);
00120 mesh.getCellsIndicesInDomain(fSnd,cRDeadZoneSndMap);
00121 cRDeadZoneRcv.reinit(cRDeadZoneRcvMap.size());
00122 cRDeadZoneSnd.reinit(cRDeadZoneSndMap.size());
00123
00124 RGhostCellRcvMap.reserve(elemsInXY);
00125 RGhostCellSndMap.reserve(elemsInXY);
00126 FXYSlabId f2Rcv(X1,X1+dx,tol);
00127 FXYSlabId f2Snd(X1-dx,X1,tol);
00128 mesh.getCellsIndicesInDomain(f2Rcv,RGhostCellRcvMap);
00129 mesh.getCellsIndicesInDomain(f2Snd,RGhostCellSndMap);
00130 RGhostCellRcv.reinit(RGhostCellRcvMap.size());
00131 RGhostCellSnd.reinit(RGhostCellSndMap.size());
00132 assert(RGhostCellRcvMap.size() == elemsInXY);
00133 assert(RGhostCellSndMap.size() == elemsInXY);
00134
00135 FXYSlabId fRFaces(X1+dx,X2,tol);
00136 mesh.tagFacesInDomain(fRFaces,fDeadZone,RIGHT_DEAD_ZONE);
00137 }
00138
00139 }
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153 LaxFriedrichsMethodMPI::~LaxFriedrichsMethodMPI()
00154 {
00155
00156 }
00157
00158 LaxFriedrichsMethodMPI::LaxFriedrichsMethodMPI(OrthoMesh &mesh,Function3D &fInitU,const VecDouble &cPor,Function3D &fPrescribedU,FaceFluxFunction &flux, double CFL,int mesh_overlap)
00159 :LaxFriedrichsMethod(mesh,fInitU,cPor,fPrescribedU,flux,CFL),mesh_overlap(mesh_overlap)
00160 {
00161 NetMPI::trace("Entering Communication Buffers ");
00162 setCommunicationBuffers(mesh,mesh_overlap);
00163 NetMPI::trace("End");
00164
00165 }
00166
00167
00168 void LaxFriedrichsMethodMPI::iterate(double t, double tEnd)
00169 {
00170 unsigned posCell,negCell;
00171 VecReq _req(4);
00172 double timeInterval = tEnd-t;
00173 double localDt = this->calculateTimeStep(m_mesh,timeInterval,m_CFL,getPorosity(),m_flux);
00174 double dt=negotiateTimeStep(localDt);
00175 NetMPI::trace("Lax Negotiating dt: Sent %g, got %g",localDt,dt);
00176
00177
00178 int nIteraction = (int) round(timeInterval/dt);
00179
00180
00181 for (int count = 0; count < nIteraction; count++)
00182 {
00183
00184 OrthoMesh::Face_It face = m_mesh.begin_face();
00185 unsigned nFaces = m_mesh.numFaces();
00186
00187 m_cValuesPrev=m_cValues;
00188
00189
00190 VecTag::iterator itDZ = fDeadZone.begin();
00191 for (unsigned faceIndex=0;faceIndex < nFaces; faceIndex++,face++,itDZ++)
00192 {
00193
00194 if (*itDZ != ACTIVE_ZONE)
00195 continue;
00196
00197
00198 double SwPos,SwNeg;
00199 this->getValuesOfTheFaceCells(m_mesh,m_fValues,m_cValuesPrev,face,SwNeg,SwPos);
00200
00201
00202
00203
00204 face->getValidCellIndices(posCell,negCell);
00205
00206
00207 double posPor = getPorosity()(posCell);
00208 double negPor = getPorosity()(negCell);
00209
00210
00211 double mPor = (posPor + negPor)/2.0;
00212
00213
00214 double flux;
00215 double bc = (getBC(faceIndex));
00216 if (bc == INFINITY)
00217 {
00218
00219
00220
00221
00222
00223
00224 flux = dt*face->areaPerCellVol() * m_flux.fluxAtFace(face,faceIndex,posCell,negCell,SwPos,SwNeg) - mPor*(SwNeg-SwPos)/6.0;
00225 }
00226 else
00227 {
00228 flux = dt*face->areaPerCellVol() * m_flux.fluxAtFace(face,faceIndex,posCell,negCell,bc,bc);
00229 }
00230
00231
00232
00233
00234 if (face->hasPosCell())
00235 m_cValues(face->getPosCell()) += - flux/posPor;
00236
00237 if (face->hasNegCell())
00238 m_cValues(face->getNegCell()) += + flux/negPor;
00239 }
00240 updateGhostCells();
00241
00242 }
00243 updateDeadZone();
00244 }
00245
00246
00247
00248
00249
00250
00251
00252 void LaxFriedrichsMethodMPI::updateGhostCells()
00253 {
00254 if (mesh_overlap)
00255 NetMPI::exchangeDataRedBlack(LGhostCellSndMap,LGhostCellSnd,LGhostCellRcvMap,LGhostCellRcv,
00256 RGhostCellSndMap,RGhostCellSnd,RGhostCellRcvMap,RGhostCellRcv,
00257 m_cValues,SEND_GHOSTCELL);
00258 else
00259 NetMPI::exchangeDataRedBlack(LGhostCellSndMap,LGhostCellSnd,LGhostCellRcvMap,LGhostCellRcv,
00260 RGhostCellSndMap,RGhostCellSnd,RGhostCellRcvMap,RGhostCellRcv,
00261 m_cValues,m_fValues,SEND_GHOSTCELL);
00262
00263
00264
00265 }
00266
00267 void LaxFriedrichsMethodMPI::updateDeadZone()
00268 {
00269 if (mesh_overlap)
00270 {
00271 NetMPI::exchangeDataRedBlack(cLDeadZoneSndMap,cLDeadZoneSnd,cLDeadZoneRcvMap,cLDeadZoneRcv,
00272 cRDeadZoneSndMap,cRDeadZoneSnd,cRDeadZoneRcvMap,cRDeadZoneRcv,
00273 m_cValues,DEAD_ZONE_VALUE);
00274 }
00275 }
00276
00277
00278 void LaxFriedrichsMethodMPI::unitTest()
00279 {
00280 std::ostream &out = NetMPI::getLog();
00281 out << "\n\nLeft Ghost Rcv\n";
00282 m_mesh.printCells(LGhostCellRcvMap,out);
00283 out << "\n\nLeft Ghost Snd\n";
00284 m_mesh.printCells(LGhostCellSndMap,out);
00285 out << "\n\nRight Ghost Rcv\n";
00286 m_mesh.printCells(RGhostCellRcvMap,out);
00287 out << "\n\nRight Ghost Snd\n";
00288 m_mesh.printCells(RGhostCellSndMap,out);
00289
00290 }
00291
00292
00293 double LaxFriedrichsMethodMPI::negotiateTimeStep(double dt)
00294 {
00295 double dd;
00296 MPI_Allreduce(&dt,&dd,1,MPI_DOUBLE,MPI_MIN,MPI_COMM_WORLD);
00297 return dd;
00298 }
00299
00300