Skip to content

Commit f3ce5bb

Browse files
author
Alessandro Gastaldi
committed
Replace global MPI_COMM_WORLD with SU2_MPI::GetComm() where appropriate
1 parent 3cb709c commit f3ce5bb

69 files changed

Lines changed: 881 additions & 881 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Common/include/linear_algebra/CPastixWrapper.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ class CPastixWrapper
9393
* \brief Run the external solver for the task it is currently setup to execute.
9494
*/
9595
void Run() {
96-
dpastix(&state, MPI_COMM_WORLD, nCols, colptr.data(), rowidx.data(), values.data(),
96+
dpastix(&state, SU2_MPI::GetComm(), nCols, colptr.data(), rowidx.data(), values.data(),
9797
loc2glb.data(), perm.data(), NULL, workvec.data(), 1, iparm, dparm);
9898
}
9999

Common/include/linear_algebra/CSysVector.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ class CSysVector : public VecExpr::CVecExpr<CSysVector<ScalarType>, ScalarType>
315315
SU2_OMP_MASTER {
316316
sum = dotRes;
317317
const auto mpi_type = (sizeof(ScalarType) < sizeof(double)) ? MPI_FLOAT : MPI_DOUBLE;
318-
SelectMPIWrapper<ScalarType>::W::Allreduce(&sum, &dotRes, 1, mpi_type, MPI_SUM, MPI_COMM_WORLD);
318+
SelectMPIWrapper<ScalarType>::W::Allreduce(&sum, &dotRes, 1, mpi_type, MPI_SUM, SU2_MPI::GetComm());
319319
}
320320
}
321321
#endif

Common/include/toolboxes/CQuasiNewtonInvLeastSquares.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,11 @@ class CQuasiNewtonInvLeastSquares {
9595

9696
su2vector<Scalar> tmp(mat.size());
9797
MPI_Wrapper::Allreduce(mat.data(), tmp.data(), iSample*(iSample+1)/2,
98-
type, MPI_SUM, MPI_COMM_WORLD);
98+
type, MPI_SUM, SU2_MPI::GetComm());
9999
mat = std::move(tmp);
100100

101101
MPI_Wrapper::Allreduce(rhs.data(), sol.data(), iSample,
102-
type, MPI_SUM, MPI_COMM_WORLD);
102+
type, MPI_SUM, SU2_MPI::GetComm());
103103
std::swap(rhs, sol);
104104
}
105105
}

Common/src/CConfig.cpp

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -5011,7 +5011,7 @@ void CConfig::SetMarkers(unsigned short val_software) {
50115011

50125012
#ifdef HAVE_MPI
50135013
if (val_software != SU2_MSH)
5014-
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
5014+
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);
50155015
#endif
50165016

50175017
/*--- Compute the total number of markers in the config file ---*/
@@ -9334,8 +9334,8 @@ void CConfig::SetProfilingCSV(void) {
93349334
int rank = MASTER_NODE;
93359335
int size = SINGLE_NODE;
93369336
#ifdef HAVE_MPI
9337-
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
9338-
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
9337+
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
9338+
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);
93399339
#endif
93409340

93419341
/*--- Each rank has the same stack trace, so the they have the same
@@ -9419,11 +9419,11 @@ void CConfig::SetProfilingCSV(void) {
94199419
}
94209420

94219421
#ifdef HAVE_MPI
9422-
MPI_Reduce(n_calls, n_calls_red, map_size, MPI_INT, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD);
9423-
MPI_Reduce(l_tot, l_tot_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD);
9424-
MPI_Reduce(l_avg, l_avg_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, MPI_COMM_WORLD);
9425-
MPI_Reduce(l_min, l_min_red, map_size, MPI_DOUBLE, MPI_MIN, MASTER_NODE, MPI_COMM_WORLD);
9426-
MPI_Reduce(l_max, l_max_red, map_size, MPI_DOUBLE, MPI_MAX, MASTER_NODE, MPI_COMM_WORLD);
9422+
MPI_Reduce(n_calls, n_calls_red, map_size, MPI_INT, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm());
9423+
MPI_Reduce(l_tot, l_tot_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm());
9424+
MPI_Reduce(l_avg, l_avg_red, map_size, MPI_DOUBLE, MPI_SUM, MASTER_NODE, SU2_MPI::GetComm());
9425+
MPI_Reduce(l_min, l_min_red, map_size, MPI_DOUBLE, MPI_MIN, MASTER_NODE, SU2_MPI::GetComm());
9426+
MPI_Reduce(l_max, l_max_red, map_size, MPI_DOUBLE, MPI_MAX, MASTER_NODE, SU2_MPI::GetComm());
94279427
#else
94289428
memcpy(n_calls_red, n_calls, map_size*sizeof(int));
94299429
memcpy(l_tot_red, l_tot, map_size*sizeof(double));
@@ -9557,8 +9557,8 @@ void CConfig::GEMMProfilingCSV(void) {
95579557
/* Parallel executable. The profiling data must be sent to the master node.
95589558
First determine the rank and size. */
95599559
int size;
9560-
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
9561-
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
9560+
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
9561+
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);
95629562

95639563
/* Check for the master node. */
95649564
if(rank == MASTER_NODE) {
@@ -9569,7 +9569,7 @@ void CConfig::GEMMProfilingCSV(void) {
95699569
/* Block until a message from this processor arrives. Determine
95709570
the number of entries in the receive buffers. */
95719571
SU2_MPI::Status status;
9572-
SU2_MPI::Probe(proc, 0, MPI_COMM_WORLD, &status);
9572+
SU2_MPI::Probe(proc, 0, SU2_MPI::GetComm(), &status);
95739573

95749574
int nEntries;
95759575
SU2_MPI::Get_count(&status, MPI_LONG, &nEntries);
@@ -9583,15 +9583,15 @@ void CConfig::GEMMProfilingCSV(void) {
95839583
vector<long> recvBufMNK(3*nEntries);
95849584

95859585
SU2_MPI::Recv(recvBufNCalls.data(), recvBufNCalls.size(),
9586-
MPI_LONG, proc, 0, MPI_COMM_WORLD, &status);
9586+
MPI_LONG, proc, 0, SU2_MPI::GetComm(), &status);
95879587
SU2_MPI::Recv(recvBufTotTime.data(), recvBufTotTime.size(),
9588-
MPI_DOUBLE, proc, 1, MPI_COMM_WORLD, &status);
9588+
MPI_DOUBLE, proc, 1, SU2_MPI::GetComm(), &status);
95899589
SU2_MPI::Recv(recvBufMinTime.data(), recvBufMinTime.size(),
9590-
MPI_DOUBLE, proc, 2, MPI_COMM_WORLD, &status);
9590+
MPI_DOUBLE, proc, 2, SU2_MPI::GetComm(), &status);
95919591
SU2_MPI::Recv(recvBufMaxTime.data(), recvBufMaxTime.size(),
9592-
MPI_DOUBLE, proc, 3, MPI_COMM_WORLD, &status);
9592+
MPI_DOUBLE, proc, 3, SU2_MPI::GetComm(), &status);
95939593
SU2_MPI::Recv(recvBufMNK.data(), recvBufMNK.size(),
9594-
MPI_LONG, proc, 4, MPI_COMM_WORLD, &status);
9594+
MPI_LONG, proc, 4, SU2_MPI::GetComm(), &status);
95959595

95969596
/* Loop over the number of entries. */
95979597
for(int i=0; i<nEntries; ++i) {
@@ -9640,15 +9640,15 @@ void CConfig::GEMMProfilingCSV(void) {
96409640

96419641
/* Send the data to the master node using blocking sends. */
96429642
SU2_MPI::Send(GEMM_Profile_NCalls.data(), GEMM_Profile_NCalls.size(),
9643-
MPI_LONG, MASTER_NODE, 0, MPI_COMM_WORLD);
9643+
MPI_LONG, MASTER_NODE, 0, SU2_MPI::GetComm());
96449644
SU2_MPI::Send(GEMM_Profile_TotTime.data(), GEMM_Profile_TotTime.size(),
9645-
MPI_DOUBLE, MASTER_NODE, 1, MPI_COMM_WORLD);
9645+
MPI_DOUBLE, MASTER_NODE, 1, SU2_MPI::GetComm());
96469646
SU2_MPI::Send(GEMM_Profile_MinTime.data(), GEMM_Profile_MinTime.size(),
9647-
MPI_DOUBLE, MASTER_NODE, 2, MPI_COMM_WORLD);
9647+
MPI_DOUBLE, MASTER_NODE, 2, SU2_MPI::GetComm());
96489648
SU2_MPI::Send(GEMM_Profile_MaxTime.data(), GEMM_Profile_MaxTime.size(),
9649-
MPI_DOUBLE, MASTER_NODE, 3, MPI_COMM_WORLD);
9649+
MPI_DOUBLE, MASTER_NODE, 3, SU2_MPI::GetComm());
96509650
SU2_MPI::Send(sendBufMNK.data(), sendBufMNK.size(),
9651-
MPI_LONG, MASTER_NODE, 4, MPI_COMM_WORLD);
9651+
MPI_LONG, MASTER_NODE, 4, SU2_MPI::GetComm());
96529652
}
96539653

96549654
#endif

Common/src/adt/CADTElemClass.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -75,14 +75,14 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
7575
/*--- First determine the number of points per rank and make them
7676
available to all ranks. ---*/
7777
int rank, size;
78-
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
79-
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
78+
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
79+
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);
8080

8181
vector<int> recvCounts(size), displs(size);
8282
int sizeLocal = (int) val_coor.size();
8383

8484
SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
85-
MPI_INT, MPI_COMM_WORLD);
85+
MPI_INT, SU2_MPI::GetComm());
8686
displs[0] = 0;
8787
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];
8888

@@ -98,14 +98,14 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
9898

9999
coorPoints.resize(sizeGlobal);
100100
SU2_MPI::Allgatherv(val_coor.data(), sizeLocal, MPI_DOUBLE, coorPoints.data(),
101-
recvCounts.data(), displs.data(), MPI_DOUBLE, MPI_COMM_WORLD);
101+
recvCounts.data(), displs.data(), MPI_DOUBLE, SU2_MPI::GetComm());
102102

103103
/*--- Determine the number of elements per rank and make them
104104
available to all ranks. ---*/
105105
sizeLocal = (int) val_VTKElem.size();
106106

107107
SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
108-
MPI_INT, MPI_COMM_WORLD);
108+
MPI_INT, SU2_MPI::GetComm());
109109
displs[0] = 0;
110110
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];
111111

@@ -118,13 +118,13 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
118118
localElemIDs.resize(sizeGlobal);
119119

120120
SU2_MPI::Allgatherv(val_VTKElem.data(), sizeLocal, MPI_UNSIGNED_SHORT, elemVTK_Type.data(),
121-
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, MPI_COMM_WORLD);
121+
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, SU2_MPI::GetComm());
122122

123123
SU2_MPI::Allgatherv(val_markerID.data(), sizeLocal, MPI_UNSIGNED_SHORT, localMarkers.data(),
124-
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, MPI_COMM_WORLD);
124+
recvCounts.data(), displs.data(), MPI_UNSIGNED_SHORT, SU2_MPI::GetComm());
125125

126126
SU2_MPI::Allgatherv(val_elemID.data(), sizeLocal, MPI_UNSIGNED_LONG, localElemIDs.data(),
127-
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
127+
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, SU2_MPI::GetComm());
128128

129129
/*--- Create the content of ranksOfElems, which stores the original ranks
130130
where the elements come from. ---*/
@@ -140,7 +140,7 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
140140
sizeLocal = (int) val_connElem.size();
141141

142142
SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
143-
MPI_INT, MPI_COMM_WORLD);
143+
MPI_INT, SU2_MPI::GetComm());
144144
displs[0] = 0;
145145
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];
146146

@@ -150,14 +150,14 @@ CADTElemClass::CADTElemClass(unsigned short val_nDim,
150150
elemConns.resize(sizeGlobal);
151151

152152
SU2_MPI::Allgatherv(val_connElem.data(), sizeLocal, MPI_UNSIGNED_LONG, elemConns.data(),
153-
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
153+
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG, SU2_MPI::GetComm());
154154
}
155155
else {
156156

157157
/*--- A local tree must be built. Copy the data from the arguments into the
158158
member variables and set the ranks to the rank of this processor. ---*/
159159
int rank;
160-
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
160+
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
161161

162162
coorPoints = val_coor;
163163
elemConns = val_connElem;

Common/src/adt/CADTPointsOnlyClass.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,14 +52,14 @@ CADTPointsOnlyClass::CADTPointsOnlyClass(unsigned short nDim,
5252
First determine the number of points per rank and store them in such
5353
a way that the info can be used directly in Allgatherv. ---*/
5454
int rank, size;
55-
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
56-
SU2_MPI::Comm_size(MPI_COMM_WORLD, &size);
55+
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
56+
SU2_MPI::Comm_size(SU2_MPI::GetComm(), &size);
5757

5858
vector<int> recvCounts(size), displs(size);
5959
int sizeLocal = (int) nPoints;
6060

6161
SU2_MPI::Allgather(&sizeLocal, 1, MPI_INT, recvCounts.data(), 1,
62-
MPI_INT, MPI_COMM_WORLD);
62+
MPI_INT, SU2_MPI::GetComm());
6363
displs[0] = 0;
6464
for(int i=1; i<size; ++i) displs[i] = displs[i-1] + recvCounts[i-1];
6565

@@ -69,26 +69,26 @@ CADTPointsOnlyClass::CADTPointsOnlyClass(unsigned short nDim,
6969
localPointIDs.resize(sizeGlobal);
7070
SU2_MPI::Allgatherv(pointID, sizeLocal, MPI_UNSIGNED_LONG, localPointIDs.data(),
7171
recvCounts.data(), displs.data(), MPI_UNSIGNED_LONG,
72-
MPI_COMM_WORLD);
72+
SU2_MPI::GetComm());
7373

7474
ranksOfPoints.resize(sizeGlobal);
7575
vector<int> rankLocal(sizeLocal, rank);
7676
SU2_MPI::Allgatherv(rankLocal.data(), sizeLocal, MPI_INT, ranksOfPoints.data(),
77-
recvCounts.data(), displs.data(), MPI_INT, MPI_COMM_WORLD);
77+
recvCounts.data(), displs.data(), MPI_INT, SU2_MPI::GetComm());
7878

7979
/*--- Gather the coordinates of the points on all ranks. ---*/
8080
for(int i=0; i<size; ++i) {recvCounts[i] *= nDim; displs[i] *= nDim;}
8181

8282
coorPoints.resize(nDim*sizeGlobal);
8383
SU2_MPI::Allgatherv(coor, nDim*sizeLocal, MPI_DOUBLE, coorPoints.data(),
84-
recvCounts.data(), displs.data(), MPI_DOUBLE, MPI_COMM_WORLD);
84+
recvCounts.data(), displs.data(), MPI_DOUBLE, SU2_MPI::GetComm());
8585
}
8686
else {
8787

8888
/*--- A local tree must be built. Copy the coordinates and point IDs and
8989
set the ranks to the rank of this processor. ---*/
9090
int rank;
91-
SU2_MPI::Comm_rank(MPI_COMM_WORLD, &rank);
91+
SU2_MPI::Comm_rank(SU2_MPI::GetComm(), &rank);
9292

9393
coorPoints.assign(coor, coor + nDim*nPoints);
9494
localPointIDs.assign(pointID, pointID + nPoints);

0 commit comments

Comments
 (0)