1
- import pytest
2
1
import sys
2
+
3
+ import pytest
3
4
from mpi4py import MPI
4
5
5
6
from .algo import partition , lower_bound
11
12
12
13
def mark_skip (item ):
13
14
comm = MPI .COMM_WORLD
14
- n_rank = comm .Get_size ()
15
+ n_rank = comm .size
15
16
n_proc_test = get_n_proc_for_test (item )
16
17
skip_msg = f"Not enough procs to execute: { n_proc_test } required but only { n_rank } available"
17
18
item .add_marker (pytest .mark .skip (reason = skip_msg ), append = False )
@@ -38,8 +39,7 @@ def create_sub_comm_of_size(global_comm, n_proc, mpi_comm_creation_function):
38
39
assert 0 , 'Unknown MPI communicator creation function. Available: `MPI_Comm_create`, `MPI_Comm_split`'
39
40
40
41
def create_sub_comms_for_each_size (global_comm , mpi_comm_creation_function ):
41
- i_rank = global_comm .Get_rank ()
42
- n_rank = global_comm .Get_size ()
42
+ n_rank = global_comm .size
43
43
sub_comms = [None ] * n_rank
44
44
for i in range (0 ,n_rank ):
45
45
n_proc = i + 1
@@ -48,8 +48,7 @@ def create_sub_comms_for_each_size(global_comm, mpi_comm_creation_function):
48
48
49
49
50
50
def add_sub_comm (items , global_comm , test_comm_creation , mpi_comm_creation_function ):
51
- i_rank = global_comm .Get_rank ()
52
- n_rank = global_comm .Get_size ()
51
+ n_rank = global_comm .size
53
52
54
53
# Strategy 'by_rank': create one sub-communicator by size, from sequential (size=1) to n_rank
55
54
if test_comm_creation == 'by_rank' :
@@ -109,7 +108,7 @@ def pytest_runtestloop(self, session) -> bool:
109
108
_ = yield
110
109
# prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED)
111
110
# when no test run on non-master
112
- if self .global_comm .Get_rank () != 0 and session .testscollected == 0 :
111
+ if self .global_comm .rank != 0 and session .testscollected == 0 :
113
112
session .testscollected = 1
114
113
return True
115
114
@@ -132,7 +131,7 @@ def pytest_runtest_logreport(self, report):
132
131
133
132
134
133
def prepare_items_to_run (items , comm ):
135
- i_rank = comm .Get_rank ()
134
+ i_rank = comm .rank
136
135
137
136
items_to_run = []
138
137
@@ -164,7 +163,7 @@ def prepare_items_to_run(items, comm):
164
163
165
164
166
165
def items_to_run_on_this_proc (items_by_steps , items_to_skip , comm ):
167
- i_rank = comm .Get_rank ()
166
+ i_rank = comm .rank
168
167
169
168
items = []
170
169
@@ -200,14 +199,13 @@ def pytest_runtestloop(self, session) -> bool:
200
199
and not session .config .option .continue_on_collection_errors
201
200
):
202
201
raise session .Interrupted (
203
- "%d error%s during collection"
204
- % (session .testsfailed , "s" if session .testsfailed != 1 else "" )
202
+ f"{ session .testsfailed } error{ 's' if session .testsfailed != 1 else '' } during collection"
205
203
)
206
204
207
205
if session .config .option .collectonly :
208
206
return True
209
207
210
- n_workers = self .global_comm .Get_size ()
208
+ n_workers = self .global_comm .size
211
209
212
210
add_n_procs (session .items )
213
211
@@ -217,12 +215,12 @@ def pytest_runtestloop(self, session) -> bool:
217
215
items_by_steps , items_to_skip , self .global_comm
218
216
)
219
217
220
- for i , item in enumerate ( items ) :
218
+ for item in items :
221
219
nextitem = None
222
220
run_item_test (item , nextitem , session )
223
221
224
222
# prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED) when no test run on non-master
225
- if self .global_comm .Get_rank () != 0 and session .testscollected == 0 :
223
+ if self .global_comm .rank != 0 and session .testscollected == 0 :
226
224
session .testscollected = 1
227
225
return True
228
226
@@ -244,8 +242,8 @@ def pytest_runtest_logreport(self, report):
244
242
gather_report_on_local_rank_0 (report )
245
243
246
244
# master ranks of each sub_comm must send their report to rank 0
247
- if sub_comm .Get_rank () == 0 : # only master are concerned
248
- if self .global_comm .Get_rank () != 0 : # if master is not global master, send
245
+ if sub_comm .rank == 0 : # only master are concerned
246
+ if self .global_comm .rank != 0 : # if master is not global master, send
249
247
self .global_comm .send (report , dest = 0 )
250
248
elif report .master_running_proc != 0 : # else, recv if test run remotely
251
249
# In the line below, MPI.ANY_TAG will NOT clash with communications outside the framework because self.global_comm is private
@@ -342,7 +340,7 @@ def wait_test_to_complete(items_to_run, session, available_procs, inter_comm):
342
340
for sub_rank in sub_ranks :
343
341
if sub_rank != first_rank_done :
344
342
rank_original_idx = inter_comm .recv (source = sub_rank , tag = WORK_DONE_TAG )
345
- assert ( rank_original_idx == original_idx ) # sub_rank is supposed to have worked on the same test
343
+ assert rank_original_idx == original_idx # sub_rank is supposed to have worked on the same test
346
344
347
345
# the procs are now available
348
346
for sub_rank in sub_ranks :
@@ -406,8 +404,7 @@ def pytest_runtestloop(self, session) -> bool:
406
404
and not session .config .option .continue_on_collection_errors
407
405
):
408
406
raise session .Interrupted (
409
- "%d error%s during collection"
410
- % (session .testsfailed , "s" if session .testsfailed != 1 else "" )
407
+ f"{ session .testsfailed } error{ 's' if session .testsfailed != 1 else '' } during collection"
411
408
)
412
409
413
410
if session .config .option .collectonly :
@@ -499,7 +496,7 @@ def pytest_runtest_logreport(self, report):
499
496
sub_comm = report .sub_comm
500
497
gather_report_on_local_rank_0 (report )
501
498
502
- if sub_comm .Get_rank () == 0 : # if local master proc, send
499
+ if sub_comm .rank == 0 : # if local master proc, send
503
500
# The idea of the scheduler is the following:
504
501
# The server schedules test over clients
505
502
# A client executes the test then report to the server it is done
0 commit comments