1
- import pytest
2
1
import sys
2
+
3
+ import pytest
3
4
from mpi4py import MPI
4
5
5
6
from .algo import partition , lower_bound
11
12
12
13
def mark_skip (item ):
13
14
comm = MPI .COMM_WORLD
14
- n_rank = comm .Get_size ()
15
+ n_rank = comm .size
15
16
n_proc_test = get_n_proc_for_test (item )
16
17
skip_msg = f"Not enough procs to execute: { n_proc_test } required but only { n_rank } available"
17
18
item .add_marker (pytest .mark .skip (reason = skip_msg ), append = False )
@@ -38,8 +39,7 @@ def create_sub_comm_of_size(global_comm, n_proc, mpi_comm_creation_function):
38
39
assert 0 , 'Unknown MPI communicator creation function. Available: `MPI_Comm_create`, `MPI_Comm_split`'
39
40
40
41
def create_sub_comms_for_each_size (global_comm , mpi_comm_creation_function ):
41
- i_rank = global_comm .Get_rank ()
42
- n_rank = global_comm .Get_size ()
42
+ n_rank = global_comm .size
43
43
sub_comms = [None ] * n_rank
44
44
for i in range (0 ,n_rank ):
45
45
n_proc = i + 1
@@ -48,8 +48,7 @@ def create_sub_comms_for_each_size(global_comm, mpi_comm_creation_function):
48
48
49
49
50
50
def add_sub_comm (items , global_comm , test_comm_creation , mpi_comm_creation_function ):
51
- i_rank = global_comm .Get_rank ()
52
- n_rank = global_comm .Get_size ()
51
+ n_rank = global_comm .size
53
52
54
53
# Strategy 'by_rank': create one sub-communicator by size, from sequential (size=1) to n_rank
55
54
if test_comm_creation == 'by_rank' :
@@ -109,7 +108,7 @@ def pytest_runtestloop(self, session) -> bool:
109
108
_ = yield
110
109
# prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED)
111
110
# when no test run on non-master
112
- if self .global_comm .Get_rank () != 0 and session .testscollected == 0 :
111
+ if self .global_comm .rank != 0 and session .testscollected == 0 :
113
112
session .testscollected = 1
114
113
return True
115
114
@@ -132,7 +131,7 @@ def pytest_runtest_logreport(self, report):
132
131
133
132
134
133
def prepare_items_to_run (items , comm ):
135
- i_rank = comm .Get_rank ()
134
+ i_rank = comm .rank
136
135
137
136
items_to_run = []
138
137
@@ -164,7 +163,7 @@ def prepare_items_to_run(items, comm):
164
163
165
164
166
165
def items_to_run_on_this_proc (items_by_steps , items_to_skip , comm ):
167
- i_rank = comm .Get_rank ()
166
+ i_rank = comm .rank
168
167
169
168
items = []
170
169
@@ -207,7 +206,7 @@ def pytest_runtestloop(self, session) -> bool:
207
206
if session .config .option .collectonly :
208
207
return True
209
208
210
- n_workers = self .global_comm .Get_size ()
209
+ n_workers = self .global_comm .size
211
210
212
211
add_n_procs (session .items )
213
212
@@ -217,12 +216,12 @@ def pytest_runtestloop(self, session) -> bool:
217
216
items_by_steps , items_to_skip , self .global_comm
218
217
)
219
218
220
- for i , item in enumerate ( items ) :
219
+ for item in items :
221
220
nextitem = None
222
221
run_item_test (item , nextitem , session )
223
222
224
223
# prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED) when no test run on non-master
225
- if self .global_comm .Get_rank () != 0 and session .testscollected == 0 :
224
+ if self .global_comm .rank != 0 and session .testscollected == 0 :
226
225
session .testscollected = 1
227
226
return True
228
227
@@ -244,8 +243,8 @@ def pytest_runtest_logreport(self, report):
244
243
gather_report_on_local_rank_0 (report )
245
244
246
245
# master ranks of each sub_comm must send their report to rank 0
247
- if sub_comm .Get_rank () == 0 : # only master are concerned
248
- if self .global_comm .Get_rank () != 0 : # if master is not global master, send
246
+ if sub_comm .rank == 0 : # only master are concerned
247
+ if self .global_comm .rank != 0 : # if master is not global master, send
249
248
self .global_comm .send (report , dest = 0 )
250
249
elif report .master_running_proc != 0 : # else, recv if test run remotely
251
250
# In the line below, MPI.ANY_TAG will NOT clash with communications outside the framework because self.global_comm is private
@@ -342,7 +341,7 @@ def wait_test_to_complete(items_to_run, session, available_procs, inter_comm):
342
341
for sub_rank in sub_ranks :
343
342
if sub_rank != first_rank_done :
344
343
rank_original_idx = inter_comm .recv (source = sub_rank , tag = WORK_DONE_TAG )
345
- assert ( rank_original_idx == original_idx ) # sub_rank is supposed to have worked on the same test
344
+ assert rank_original_idx == original_idx # sub_rank is supposed to have worked on the same test
346
345
347
346
# the procs are now available
348
347
for sub_rank in sub_ranks :
@@ -499,7 +498,7 @@ def pytest_runtest_logreport(self, report):
499
498
sub_comm = report .sub_comm
500
499
gather_report_on_local_rank_0 (report )
501
500
502
- if sub_comm .Get_rank () == 0 : # if local master proc, send
501
+ if sub_comm .rank == 0 : # if local master proc, send
503
502
# The idea of the scheduler is the following:
504
503
# The server schedules test over clients
505
504
# A client executes the test then report to the server it is done
0 commit comments