#include <mainappmpi.h>
Public Member Functions | |
MainAppMPI () | |
~MainAppMPI () | |
void | execute (std::string fileName) |
Protected Member Functions | |
bool | runningInParallel () |
int | getNProcess () |
int | getRank () |
Point3D | getLocalP0 () |
Point3D | getLocalP1 () |
std::vector< unsigned > | getLocalNElems () |
void | setLocalTriangulation () |
void | printLocalTriangulation () |
Protected Attributes | |
std::ofstream | out |
Private Member Functions | |
std::string | appendRankInFileName (std::string fileName) |
Definition at line 9 of file mainappmpi.h.
MainAppMPI::MainAppMPI | ( | ) |
Definition at line 24 of file mainappmpi.cpp.
00025 :MainApp() 00026 { 00027 if (NetMPI::nProcess() == 1) 00028 setOutput(std::cout); 00029 else 00030 { 00031 char strOut[200]; 00032 if (getRank() == 0) 00033 system("rm out-*"); 00034 NetMPI::Barrier(); 00035 sprintf(strOut,"out-%d",getRank()); 00036 out.open(strOut); 00037 setOutput(out); 00038 NetMPI::setLog(out); 00039 } 00040 }
MainAppMPI::~MainAppMPI | ( | ) |
Definition at line 46 of file mainappmpi.cpp.
std::string MainAppMPI::appendRankInFileName | ( | std::string | fileName | ) | [private] |
Definition at line 364 of file mainappmpi.cpp.
00365 { 00366 00367 char strHDF5[500]; 00368 sprintf(strHDF5,"%s_%d.hdf", 00369 strtok( basename((char*) fileName.c_str()),"."),getRank()); 00370 00371 string result = dirname((char*) fileName.c_str()); 00372 result+="/"; 00373 result+=strHDF5; 00374 return result; 00375 }
void MainAppMPI::execute | ( | std::string | fileName | ) |
Execute the program
Definition at line 153 of file mainappmpi.cpp.
00154 { 00155 try { 00156 //Read the config file. 00157 configFile.readFile(fileName); 00158 00159 if (configFile.getInt("DEBUG_PRINT_TRIANGULATION",0) == 1) 00160 { 00161 getMesh().print(); 00162 } 00163 00164 if (configFile.getInt("UNIT_TESTS",0)) 00165 { 00166 UnitTests tests(*this); 00167 tests.execute(); 00168 printBeginSection("END UNITS TESTS"); 00169 return; 00170 } 00171 00172 //Initialize HDF5 writer with the triangulation. 00173 HDF5OrthoWriter &hdf5 = HDF5OrthoWriter::getHDF5OrthoWriter(); 00174 hdf5.setOutputFile(getHDF5FileName()); 00175 print("HDF5","HDF5 Ouput: %s",getHDF5FileName().c_str()); 00176 hdf5.writeMesh("tria0",getMesh()); 00177 hdf5.setVariable("Time",0); 00178 00179 m_pSequencer = getSequencer(); 00180 00181 switch (configFile.getInt("SEQUENCE_ALGORITHM")) 00182 { 00183 case 0: 00184 print("Sequencer","Dynamic Module Test\n"); 00185 printLog(std::cout); 00186 m_pTransportModule = getTransportModule(); 00187 m_pDynamicModule = getDynamicModule(); 00188 m_pSequencer->testDynamicModule(*m_pDynamicModule,*m_pTransportModule); 00189 break; 00190 case 1: 00191 { 00192 m_pTransportModule = getTransportModule(); 00193 m_pDynamicModule = getDynamicModule(); 00194 00195 print("Sequencer","Method: Transport Test\nEnd Time: %g\nOutputs: %g\n\n\n", 00196 configFile.getDouble("END_TIME"), 00197 configFile.getDouble("N_OUTPUTS")); 00198 printLog(std::cout); 00199 00200 m_pSequencer->testTransport(*m_pDynamicModule,*m_pTransportModule, 00201 configFile.getDouble("END_TIME"), 00202 configFile.getDouble("N_OUTPUTS")); 00203 break; 00204 } 00205 case 2: //Case to solve the biphasic incompressible flow 00206 { 00207 m_pTransportModule = getTransportModule(); 00208 m_pDynamicModule = getDynamicModule(); 00209 m_pDiff = getDiffusiveStep(); 00210 m_pDiff->setTransport(*m_pTransportModule); 00211 00212 00213 int nDyn = configFile.getInt("N_DYN_ITERATIONS"); 00214 int nOuts = configFile.getInt("N_OUTPUTS"); 00215 int nOutsPerDyn = nOuts/nDyn; 00216 if (nOutsPerDyn == 0) 00217 nOutsPerDyn = 1; 00218 print("Sequencer","Method: Staggered Iteration\nEnd Time: %g\nDynamic Modules Iterations: %d\nOutputs Per Dyn Iteration: %d\n", 00219 configFile.getDouble("END_TIME"), 00220 configFile.getInt("N_DYN_ITERATIONS"), 00221 nOutsPerDyn); 00222 printLog(std::cout); 00223 m_pSequencer->alternateIteration(*m_pDynamicModule,*m_pTransportModule,m_pDiff, 00224 configFile.getDouble("END_TIME"), 00225 configFile.getInt("N_DYN_ITERATIONS"), 00226 nOuts); 00227 break; 00228 } 00229 case 3: 00230 { 00231 m_pFlash = getFlash(); 00232 m_pTransportModule = getTransportModule(); 00233 m_pDynamicModule = getDynamicModule(); 00234 m_pFlash->setTransport(*m_pTransportModule); 00235 m_pFlash->setDynamic(*m_pDynamicModule); 00236 00237 m_pDiff = getDiffusiveStep(); 00238 m_pDiff->setTransport(*m_pTransportModule); 00239 00240 00241 /* Now if this is a compressible Model the initial condition of the transport 00242 must be updated such that all mixture could have the volume filled buy all 00243 the pores. We do that buy changing the number of total moles of all the mixture 00244 preserving the same proportion of moles in each component previously 00245 defined buy the initial conditions of the transport equations 00246 */ 00247 if (typeid(*m_pFlash) != typeid(DummyFlash)) 00248 { 00249 print("Sequencer", 00250 "Adjusting Initial Conditions for Compressible Model\n"); 00251 adjustInitialConditionForCompressibleModel(*m_pDynamicModule,*dynamic_cast<ConservativeMethodForSystem*>(m_pTransportModule),*m_pFlash); 00252 } 00253 00254 00255 print("Sequencer","Method: Staggered Iteration with Flash calculation\n"); 00256 print("Sequencer","End Time: %g\n" ,configFile.getDouble("END_TIME")); 00257 print("Sequencer","Dynamic Iterations: %g\n",configFile.getDouble("N_DYN_ITERATIONS")); 00258 ConservativeMethodForSystem* pTransportSystem = dynamic_cast<ConservativeMethodForSystem*>(m_pTransportModule); 00259 00260 00261 if (configFile.isDefined("TRANSPORT_PER_DYNAMIC_STEPS")) 00262 { 00263 CompressibleDynamic *pCompDynamic; 00264 pCompDynamic = dynamic_cast<CompressibleDynamic*>(m_pDynamicModule); 00265 if (!pCompDynamic) 00266 { 00267 throw new Exception("The proportion control sequence alghorithm demands a module that implements CompressibleDynamicBase\n"); 00268 } 00269 print("Sequencer","Max Transport Steps per Cycle: %g\n",configFile.getDouble("TRANSPORT_PER_DYNAMIC_STEPS")); 00270 printLog(std::cout); 00271 m_pSequencer->alternateIterationProportionControl(*pCompDynamic, 00272 *pTransportSystem, 00273 *m_pFlash,m_pDiff, 00274 configFile.getDouble("END_TIME"), 00275 configFile.getDouble("TRANSPORT_PER_DYNAMIC_STEPS"), 00276 configFile.getDouble("TRANSPORT_PER_DYNAMIC_STEPS_TOLERANCE"), 00277 getDynamicTimeStep(), 00278 configFile.getDouble("N_OUTPUTS")); 00279 00280 } 00281 else 00282 { 00283 00284 printLog(std::cout); 00285 m_pSequencer->alternateIteration(*m_pDynamicModule, 00286 *pTransportSystem, 00287 *m_pFlash, 00288 m_pDiff, 00289 configFile.getDouble("END_TIME"), 00290 configFile.getDouble("N_DYN_ITERATIONS"), 00291 configFile.getDouble("N_OUTPUTS")); 00292 } 00293 } 00294 break; 00295 case 4: 00296 { 00297 m_pTransportModule = getTransportModule(); 00298 m_pDiff = getDiffusiveStep(); 00299 m_pDiff->setTransport(*m_pTransportModule); 00300 00301 int nDyn = configFile.getInt("N_DYN_ITERATIONS"); 00302 unsigned nOuts = configFile.getInt("N_OUTPUTS"); 00303 int nOutsPerDyn = nOuts/nDyn; 00304 if (nOutsPerDyn == 0) 00305 nOutsPerDyn = 1; 00306 printLog(std::cout); 00307 m_pSequencer->diffusiveStepTest(*m_pDiff, configFile.getInt("N_DYN_ITERATIONS"), 00308 configFile.getDouble("END_TIME"), 00309 nOuts); 00310 00311 break; 00312 00313 00314 } 00315 default: 00316 throw new Exception("Wrong option for the SEQUENCE_ALGORITHM key\n"); 00317 return; //Just to avoid compiling warnings 00318 } 00319 printf("\n\nSimulation Done\n"); 00320 hdf5.close(); 00321 return; 00322 00323 } 00324 catch(Exception *e) 00325 { 00326 printf("Error"); 00327 printBeginSection("ERROR MESSAGE"); 00328 printf("%s\n\nQuitting.....\n",e->getError().c_str()); 00329 abort(); 00330 } 00331 00332 }
std::vector< unsigned > MainAppMPI::getLocalNElems | ( | ) | [protected] |
Definition at line 125 of file mainappmpi.cpp.
00126 { 00127 std::vector<unsigned> v(3); 00128 00129 int rank = getRank(); 00130 int nProcess = getNProcess(); 00131 std::vector<unsigned> nElems = MainApp::getNElements(); 00132 00133 00134 int rest = nElems[0]%nProcess; 00135 int nElements = nElems[0]/nProcess + (rank < rest ? 1 : 0); 00136 00137 if (nProcess == 1) 00138 nElements+=0; 00139 else if (rank == 0 || rank == (nProcess -1)) 00140 nElements+=configFile.getInt("MESH_OVERLAP_SIZE"); 00141 else 00142 nElements+=2*configFile.getInt("MESH_OVERLAP_SIZE"); 00143 00144 00145 00146 v[0]=nElements; 00147 v[1]=nElems[1]; 00148 v[2]=nElems[2]; 00149 return v; 00150 }
Point3D MainAppMPI::getLocalP0 | ( | ) | [protected] |
Definition at line 93 of file mainappmpi.cpp.
00094 { 00095 std::vector<unsigned> nElems = getNElements(); 00096 Point3D DX = getDX(); 00097 00098 int rank = getRank(); 00099 int nProcess = getNProcess(); 00100 00101 //Get the first startElement of the non overlapping region 00102 unsigned startElement = nElems[0]/nProcess*rank; 00103 int rest = nElems[0]%nProcess; 00104 startElement +=( (rank < rest) ? rank : rest); 00105 00106 //Now add the overlap region 00107 if (rank != 0) 00108 { 00109 startElement-=configFile.getInt("MESH_OVERLAP_SIZE"); 00110 } 00111 return Point3D(startElement*DX(0),0,0); 00112 }
Point3D MainAppMPI::getLocalP1 | ( | ) | [protected] |
Definition at line 115 of file mainappmpi.cpp.
00116 { 00117 Point3D LP1 = MainApp::getP1(); 00118 Point3D DX = getDX(); 00119 LP1(0) = getLocalP0()[0]+getLocalNElems()[0]*DX(0); 00120 return LP1; 00121 }
int MainAppMPI::getNProcess | ( | ) | [protected] |
Definition at line 78 of file mainappmpi.cpp.
int MainAppMPI::getRank | ( | ) | [protected] |
Definition at line 85 of file mainappmpi.cpp.
void MainAppMPI::printLocalTriangulation | ( | ) | [protected] |
Definition at line 338 of file mainappmpi.cpp.
00339 { 00340 Point3D P0 = getLocalP0(); 00341 Point3D P1 = getLocalP1(); 00342 Point3D DX = getDX(); 00343 std::vector<unsigned> el = getLocalNElems(); 00344 print("Mesh","\ 00345 Local Triangulation:\n\ 00346 Elements: %d x %d x %d\n\ 00347 Dimensions: %g x %g %g\n\ 00348 Steps: %g,%g,%g\n\ 00349 Domain <%g, %g, %g> - <%g,%g,%g>\n", 00350 el[0],el[1],el[2], 00351 P1[0]-P0[0],P1[1]-P0[1],P1[2]-P0[2], 00352 DX[0],DX[1],DX[2], 00353 P0[0],P0[1],P0[2], 00354 P1[0],P1[1],P1[2]); 00355 print("Mesh","Total: %u cells, %u faces\n",getMesh().numCells(),getMesh().numFaces()); 00356 }
bool MainAppMPI::runningInParallel | ( | ) | [protected] |
Return if the options given by the use can run with code that is parallel or not
Definition at line 382 of file mainappmpi.cpp.
00383 { 00384 if (NetMPI::nProcess() == 1) 00385 return false; 00386 else 00387 return true; 00388 00389 // int option = configFile.getInt("DYNAMIC_MODULE"); 00390 // int option2 = configFile.getInt("TRANSPORT_MODULE"); 00391 // if ((option > 1 && option <=8)) 00392 // return false; 00393 // else 00394 // return true; 00395 }
void MainAppMPI::setLocalTriangulation | ( | ) | [protected] |
Definition at line 53 of file mainappmpi.cpp.
00054 { 00055 00056 if (this->runningInParallel()) 00057 { 00058 if (getLocalNElems()[0] < 2*configFile.getUnsigned("MESH_OVERLAP_SIZE")) 00059 { 00060 throw new Exception("Mesh Overlap gets the entire domain"); 00061 } 00062 } 00063 Point<3> P0 = getLocalP0(); 00064 Point<3> P1 = getLocalP1(); 00065 00066 std::vector<unsigned> v = getLocalNElems(); 00067 VecWellInfo wells = getWells(); 00068 00069 00070 //OK generate the grid 00071 m_pMesh = new OrthoMesh(P0,P1,v[0],v[1],v[2]); 00072 m_pMesh->putWells(wells); 00073 00074 00075 }
std::ofstream MainAppMPI::out [protected] |
Definition at line 15 of file mainappmpi.h.