Skip to content

Commit 4508055

Browse files
cleaning with pylint
1 parent 8570a58 commit 4508055

21 files changed

+190
-189
lines changed

.github/workflows/test.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ jobs:
3030
pip install -r *.egg-info/requires.txt
3131
- name: Analysing the code with pylint
3232
run: |
33-
pylint --unsafe-load-any-extension=y --disable=fixme $(git ls-files '*.py') || true
33+
pylint --unsafe-load-any-extension=y --disable=fixme $(git ls-files "pytest_parallel/*.py" "test/*.py") || true
3434
3535
build:
3636
needs: [pylint]

.slurm_draft/worker.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -11,13 +11,13 @@
1111
test_idx = int(sys.argv[3])
1212

1313
comm = MPI.COMM_WORLD
14-
print(f'start at {scheduler_ip}@{server_port} test {test_idx} at rank {comm.Get_rank()}/{comm.Get_size()} exec on {socket.gethostname()} - ',datetime.datetime.now())
14+
print(f'start at {scheduler_ip}@{server_port} test {test_idx} at rank {comm.rank}/{comm.size} exec on {socket.gethostname()} - ',datetime.datetime.now())
1515

16-
if comm.Get_rank() == 0:
16+
if comm.rank == 0:
1717
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
1818
s.connect((scheduler_ip, server_port))
1919
#time.sleep(10+5*test_idx)
20-
#msg = f'Hello from test {test_idx} at rank {comm.Get_rank()}/{comm.Get_size()} exec on {socket.gethostname()}'
20+
#msg = f'Hello from test {test_idx} at rank {comm.rank}/{comm.size} exec on {socket.gethostname()}'
2121
#socket_utils.send(s, msg)
2222
info = {
2323
'test_idx': test_idx,

pytest_parallel/gather_report.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ def gather_report_on_local_rank_0(report):
4545
del report.sub_comm # No need to keep it in the report
4646
# Furthermore we need to serialize the report
4747
# and mpi4py does not know how to serialize report.sub_comm
48-
i_sub_rank = sub_comm.Get_rank()
49-
n_sub_rank = sub_comm.Get_size()
48+
i_sub_rank = sub_comm.rank
49+
n_sub_rank = sub_comm.size
5050

5151
if (
5252
report.outcome != "skipped"

pytest_parallel/mpi_reporter.py

+15-16
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
1-
import pytest
21
import sys
2+
3+
import pytest
34
from mpi4py import MPI
45

56
from .algo import partition, lower_bound
@@ -11,7 +12,7 @@
1112

1213
def mark_skip(item):
1314
comm = MPI.COMM_WORLD
14-
n_rank = comm.Get_size()
15+
n_rank = comm.size
1516
n_proc_test = get_n_proc_for_test(item)
1617
skip_msg = f"Not enough procs to execute: {n_proc_test} required but only {n_rank} available"
1718
item.add_marker(pytest.mark.skip(reason=skip_msg), append=False)
@@ -38,8 +39,7 @@ def create_sub_comm_of_size(global_comm, n_proc, mpi_comm_creation_function):
3839
assert 0, 'Unknown MPI communicator creation function. Available: `MPI_Comm_create`, `MPI_Comm_split`'
3940

4041
def create_sub_comms_for_each_size(global_comm, mpi_comm_creation_function):
41-
i_rank = global_comm.Get_rank()
42-
n_rank = global_comm.Get_size()
42+
n_rank = global_comm.size
4343
sub_comms = [None] * n_rank
4444
for i in range(0,n_rank):
4545
n_proc = i+1
@@ -48,8 +48,7 @@ def create_sub_comms_for_each_size(global_comm, mpi_comm_creation_function):
4848

4949

5050
def add_sub_comm(items, global_comm, test_comm_creation, mpi_comm_creation_function):
51-
i_rank = global_comm.Get_rank()
52-
n_rank = global_comm.Get_size()
51+
n_rank = global_comm.size
5352

5453
# Strategy 'by_rank': create one sub-communicator by size, from sequential (size=1) to n_rank
5554
if test_comm_creation == 'by_rank':
@@ -109,7 +108,7 @@ def pytest_runtestloop(self, session) -> bool:
109108
_ = yield
110109
# prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED)
111110
# when no test run on non-master
112-
if self.global_comm.Get_rank() != 0 and session.testscollected == 0:
111+
if self.global_comm.rank != 0 and session.testscollected == 0:
113112
session.testscollected = 1
114113
return True
115114

@@ -132,7 +131,7 @@ def pytest_runtest_logreport(self, report):
132131

133132

134133
def prepare_items_to_run(items, comm):
135-
i_rank = comm.Get_rank()
134+
i_rank = comm.rank
136135

137136
items_to_run = []
138137

@@ -164,7 +163,7 @@ def prepare_items_to_run(items, comm):
164163

165164

166165
def items_to_run_on_this_proc(items_by_steps, items_to_skip, comm):
167-
i_rank = comm.Get_rank()
166+
i_rank = comm.rank
168167

169168
items = []
170169

@@ -207,7 +206,7 @@ def pytest_runtestloop(self, session) -> bool:
207206
if session.config.option.collectonly:
208207
return True
209208

210-
n_workers = self.global_comm.Get_size()
209+
n_workers = self.global_comm.size
211210

212211
add_n_procs(session.items)
213212

@@ -217,12 +216,12 @@ def pytest_runtestloop(self, session) -> bool:
217216
items_by_steps, items_to_skip, self.global_comm
218217
)
219218

220-
for i, item in enumerate(items):
219+
for item in items:
221220
nextitem = None
222221
run_item_test(item, nextitem, session)
223222

224223
# prevent return value being non-zero (ExitCode.NO_TESTS_COLLECTED) when no test run on non-master
225-
if self.global_comm.Get_rank() != 0 and session.testscollected == 0:
224+
if self.global_comm.rank != 0 and session.testscollected == 0:
226225
session.testscollected = 1
227226
return True
228227

@@ -244,8 +243,8 @@ def pytest_runtest_logreport(self, report):
244243
gather_report_on_local_rank_0(report)
245244

246245
# master ranks of each sub_comm must send their report to rank 0
247-
if sub_comm.Get_rank() == 0: # only master are concerned
248-
if self.global_comm.Get_rank() != 0: # if master is not global master, send
246+
if sub_comm.rank == 0: # only master are concerned
247+
if self.global_comm.rank != 0: # if master is not global master, send
249248
self.global_comm.send(report, dest=0)
250249
elif report.master_running_proc != 0: # else, recv if test run remotely
251250
# In the line below, MPI.ANY_TAG will NOT clash with communications outside the framework because self.global_comm is private
@@ -342,7 +341,7 @@ def wait_test_to_complete(items_to_run, session, available_procs, inter_comm):
342341
for sub_rank in sub_ranks:
343342
if sub_rank != first_rank_done:
344343
rank_original_idx = inter_comm.recv(source=sub_rank, tag=WORK_DONE_TAG)
345-
assert (rank_original_idx == original_idx) # sub_rank is supposed to have worked on the same test
344+
assert rank_original_idx == original_idx # sub_rank is supposed to have worked on the same test
346345

347346
# the procs are now available
348347
for sub_rank in sub_ranks:
@@ -499,7 +498,7 @@ def pytest_runtest_logreport(self, report):
499498
sub_comm = report.sub_comm
500499
gather_report_on_local_rank_0(report)
501500

502-
if sub_comm.Get_rank() == 0: # if local master proc, send
501+
if sub_comm.rank == 0: # if local master proc, send
503502
# The idea of the scheduler is the following:
504503
# The server schedules test over clients
505504
# A client executes the test then report to the server it is done

pytest_parallel/plugin.py

+11-10
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
import tempfile
88
from pathlib import Path
99
import argparse
10+
1011
import pytest
1112
from _pytest.terminal import TerminalReporter
1213

@@ -104,9 +105,9 @@ def pytest_configure(config):
104105
slurm_file = config.getoption('slurm_file')
105106
slurm_export_env = config.getoption('slurm_export_env')
106107
detach = config.getoption('detach')
107-
if scheduler != 'slurm' and scheduler != 'shell':
108+
if not scheduler in ['slurm', 'shell']:
108109
assert not is_worker, f'Internal pytest_parallel error `--_worker` not available with`--scheduler={scheduler}`'
109-
if (scheduler == 'slurm' or scheduler == 'shell') and not is_worker:
110+
if scheduler in ['slurm', 'shell'] and not is_worker:
110111
if n_workers is None:
111112
raise PytestParallelError(f'You need to specify `--n-workers` when `--scheduler={scheduler}`')
112113
if scheduler != 'slurm':
@@ -119,7 +120,7 @@ def pytest_configure(config):
119120
if slurm_file is not None:
120121
raise PytestParallelError('Option `--slurm-file` only available when `--scheduler=slurm`')
121122

122-
if (scheduler == 'shell' or scheduler == 'slurm') and not is_worker:
123+
if scheduler in ['shell', 'slurm'] and not is_worker:
123124
from mpi4py import MPI
124125
if MPI.COMM_WORLD.size != 1:
125126
err_msg = 'Do not launch `pytest_parallel` on more that one process when `--scheduler=shell` or `--scheduler=slurm`.\n' \
@@ -142,7 +143,7 @@ def pytest_configure(config):
142143
raise PytestParallelError('You cannot specify `--slurm-init-cmds` together with `--slurm-file`')
143144

144145
if '-n=' in slurm_options or '--ntasks=' in slurm_options:
145-
raise PytestParallelError('Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).')
146+
raise PytestParallelError('Do not specify `-n/--ntasks` in `--slurm-options` (it is deduced from the `--n-worker` value).')
146147

147148
from .slurm_scheduler import SlurmScheduler
148149

@@ -154,7 +155,7 @@ def pytest_configure(config):
154155
## pull apart `--slurm-options` for special treatement
155156
main_invoke_params = main_invoke_params.replace(f'--slurm-options={slurm_options}', '')
156157
for file_or_dir in config.option.file_or_dir:
157-
main_invoke_params = main_invoke_params.replace(file_or_dir, '')
158+
main_invoke_params = main_invoke_params.replace(file_or_dir, '')
158159
slurm_option_list = slurm_options.split() if slurm_options is not None else []
159160
slurm_conf = {
160161
'options' : slurm_option_list,
@@ -172,7 +173,7 @@ def pytest_configure(config):
172173
# reconstruct complete invoke string
173174
main_invoke_params = _invoke_params(config.invocation_params.args)
174175
for file_or_dir in config.option.file_or_dir:
175-
main_invoke_params = main_invoke_params.replace(file_or_dir, '')
176+
main_invoke_params = main_invoke_params.replace(file_or_dir, '')
176177
plugin = ShellStaticScheduler(main_invoke_params, n_workers, detach)
177178
else:
178179
from mpi4py import MPI
@@ -190,7 +191,7 @@ def pytest_configure(config):
190191
elif scheduler == 'dynamic':
191192
inter_comm = spawn_master_process(global_comm)
192193
plugin = DynamicScheduler(global_comm, inter_comm)
193-
elif (scheduler == 'slurm' or scheduler == 'shell') and is_worker:
194+
elif scheduler in ['shell', 'slurm'] and is_worker:
194195
scheduler_ip_address = config.getoption('_scheduler_ip_address')
195196
scheduler_port = config.getoption('_scheduler_port')
196197
session_folder = config.getoption('_session_folder')
@@ -209,7 +210,7 @@ def pytest_configure(config):
209210

210211
# Pytest relies on having a terminal reporter to decide on how to create error messages, see #12
211212
# Hence, register a terminal reporter that outputs to /dev/null
212-
null_file = open(os.devnull,'w')
213+
null_file = open(os.devnull,'w', encoding="utf-8")
213214
terminal_reporter = TerminalReporter(config, null_file)
214215
config.pluginmanager.register(terminal_reporter, "terminalreporter")
215216

@@ -238,7 +239,7 @@ def __init__(self, comm):
238239
def __enter__(self):
239240
from mpi4py import MPI
240241
if self.comm != MPI.COMM_NULL: # TODO DEL once non-participating rank do not participate in fixtures either
241-
rank = self.comm.Get_rank()
242+
rank = self.comm.rank
242243
self.tmp_dir = tempfile.TemporaryDirectory() if rank == 0 else None
243244
self.tmp_path = Path(self.tmp_dir.name) if rank == 0 else None
244245
return self.comm.bcast(self.tmp_path, root=0)
@@ -247,7 +248,7 @@ def __exit__(self, type, value, traceback):
247248
from mpi4py import MPI
248249
if self.comm != MPI.COMM_NULL: # TODO DEL once non-participating rank do not participate in fixtures either
249250
self.comm.barrier()
250-
if self.comm.Get_rank() == 0:
251+
if self.comm.rank == 0:
251252
self.tmp_dir.cleanup()
252253

253254

pytest_parallel/process_worker.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
1-
import pytest
1+
from pathlib import Path
2+
import pickle
23

4+
import pytest
35
from mpi4py import MPI
46

5-
from pathlib import Path
6-
import pickle
77
from .utils.items import get_n_proc_for_test, run_item_test
88
from .gather_report import gather_report_on_local_rank_0
99

@@ -32,15 +32,15 @@ def pytest_runtestloop(self, session) -> bool:
3232

3333
# check there is no file from a previous run
3434
if comm.rank == 0:
35-
for when in {'fatal_error', 'setup', 'call', 'teardown'}:
35+
for when in ['fatal_error', 'setup', 'call', 'teardown']:
3636
path = self._file_path(when)
3737
assert not path.exists(), f'INTERNAL FATAL ERROR in pytest_parallel: file "{path}" should not exist at this point'
3838

3939
# check the number of procs matches the one specified by the test
4040
if comm.size != test_comm_size: # fatal error, SLURM and MPI do not interoperate correctly
4141
if comm.rank == 0:
4242
error_info = f'FATAL ERROR in pytest_parallel with slurm scheduling: test `{item.nodeid}`' \
43-
f' uses a `comm` of size {test_comm_size} but was launched with size {comm.Get_size()}.\n' \
43+
f' uses a `comm` of size {test_comm_size} but was launched with size {comm.size}.\n' \
4444
f' This generally indicates that `srun` does not interoperate correctly with MPI.'
4545
file_path = self._file_path('fatal_error')
4646
with open(file_path, "w") as f:
@@ -55,7 +55,7 @@ def pytest_runtestloop(self, session) -> bool:
5555
assert 0, f'{item.test_info["fatal_error"]}'
5656

5757
return True
58-
58+
5959
@pytest.hookimpl(hookwrapper=True)
6060
def pytest_runtest_makereport(self, item):
6161
"""

pytest_parallel/send_report.py

+34-35
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,12 @@
22
import socket
33
import pickle
44
from pathlib import Path
5-
from .utils.socket import send as socket_send
65
from _pytest._code.code import (
76
ExceptionChainRepr,
87
ReprTraceback,
98
ReprEntryNative,
109
)
10+
from .utils.socket import send as socket_send
1111

1212

1313
parser = argparse.ArgumentParser(description='Send return the codes of the tests to the master pytest_parallel process')
@@ -21,52 +21,51 @@
2121
args = parser.parse_args()
2222

2323
def _file_path(when):
24-
return Path(f'.pytest_parallel/{args._session_folder}/_partial/{args._test_idx}_{when}')
24+
return Path(f'.pytest_parallel/{args._session_folder}/_partial/{args._test_idx}_{when}')
2525

2626
test_info = {'test_idx': args._test_idx, 'fatal_error': None} # TODO no fatal_error=None (absense means no error)
2727

2828
# 'fatal_error' file
2929
file_path = _file_path('fatal_error')
3030
if file_path.exists():
31-
with open(file_path, 'r') as file:
32-
fatal_error = file.read()
33-
test_info['fatal_error'] = fatal_error
31+
with open(file_path, 'r') as file:
32+
fatal_error = file.read()
33+
test_info['fatal_error'] = fatal_error
3434

3535

3636
# 'setup/call/teardown' files
3737
already_failed = False
3838
for when in ('setup', 'call', 'teardown'):
39-
file_path = _file_path(when)
40-
if file_path.exists():
41-
try:
42-
with open(file_path, 'rb') as file:
43-
report_info = file.read()
44-
report_info = pickle.loads(report_info)
45-
test_info[when] = report_info
46-
except pickle.PickleError:
47-
test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}'
48-
else: # Supposedly not found because the test crashed before writing the file
49-
collect_longrepr = []
50-
msg = f'Error: the test crashed. '
51-
red = 31
52-
bold = 1
53-
msg = f'\x1b[{red}m' + f'\x1b[{bold}m' + msg+ '\x1b[0m'
54-
msg += f'Log file: {args._test_name}\n'
55-
trace_back = ReprTraceback([ReprEntryNative(msg)], None, None)
56-
collect_longrepr.append(
57-
(trace_back, None, None)
58-
)
59-
longrepr = ExceptionChainRepr(collect_longrepr)
60-
61-
outcome = 'passed' if already_failed else 'failed' # No need to report the error twice
62-
test_info[when] = {'outcome' : outcome,
63-
'longrepr': longrepr,
64-
'duration': 0, } # unable to report accurately
39+
file_path = _file_path(when)
40+
if file_path.exists():
41+
try:
42+
with open(file_path, 'rb') as file:
43+
report_info = file.read()
44+
report_info = pickle.loads(report_info)
45+
test_info[when] = report_info
46+
except pickle.PickleError:
47+
test_info['fatal_error'] = f'FATAL ERROR in pytest_parallel : unable to decode {file_path}'
48+
else: # Supposedly not found because the test crashed before writing the file
49+
collect_longrepr = []
50+
msg = f'Error: the test crashed. '
51+
red = 31
52+
bold = 1
53+
msg = f'\x1b[{red}m' + f'\x1b[{bold}m' + msg+ '\x1b[0m'
54+
msg += f'Log file: {args._test_name}\n'
55+
trace_back = ReprTraceback([ReprEntryNative(msg)], None, None)
56+
collect_longrepr.append(
57+
(trace_back, None, None)
58+
)
59+
longrepr = ExceptionChainRepr(collect_longrepr)
6560

66-
already_failed = True
61+
outcome = 'passed' if already_failed else 'failed' # No need to report the error twice
62+
test_info[when] = {'outcome' : outcome,
63+
'longrepr': longrepr,
64+
'duration': 0, } # unable to report accurately
6765

66+
already_failed = True
6867

69-
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
70-
s.connect((args._scheduler_ip_address, args._scheduler_port))
71-
socket_send(s, pickle.dumps(test_info))
7268

69+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
70+
s.connect((args._scheduler_ip_address, args._scheduler_port))
71+
socket_send(s, pickle.dumps(test_info))

0 commit comments

Comments
 (0)