5
5
import dask
6
6
import pytest
7
7
from dask .distributed import Client , wait
8
- from distributed .utils_test import loop , captured_logger # noqa: F401
8
+ from distributed .utils_test import captured_logger
9
9
from dask .utils import tmpfile
10
10
11
11
from dask_kubernetes import KubeCluster , make_pod_spec
@@ -75,17 +75,17 @@ def test_ipython_display(cluster):
75
75
sleep (0.5 )
76
76
77
77
78
- def test_env (pod_spec , loop ):
79
- with KubeCluster (pod_spec , env = {"ABC" : "DEF" }, loop = loop ) as cluster :
78
+ def test_env (pod_spec ):
79
+ with KubeCluster (pod_spec , env = {"ABC" : "DEF" }) as cluster :
80
80
cluster .scale (1 )
81
- with Client (cluster , loop = loop ) as client :
81
+ with Client (cluster ) as client :
82
82
while not cluster .scheduler_info ["workers" ]:
83
83
sleep (0.1 )
84
84
env = client .run (lambda : dict (os .environ ))
85
85
assert all (v ["ABC" ] == "DEF" for v in env .values ())
86
86
87
87
88
- def dont_test_pod_template_yaml (docker_image , loop ):
88
+ def dont_test_pod_template_yaml (docker_image ):
89
89
test_yaml = {
90
90
"kind" : "Pod" ,
91
91
"metadata" : {"labels" : {"app" : "dask" , "component" : "dask-worker" }},
@@ -109,9 +109,9 @@ def dont_test_pod_template_yaml(docker_image, loop):
109
109
with tmpfile (extension = "yaml" ) as fn :
110
110
with open (fn , mode = "w" ) as f :
111
111
yaml .dump (test_yaml , f )
112
- with KubeCluster (f .name , loop = loop ) as cluster :
112
+ with KubeCluster (f .name ) as cluster :
113
113
cluster .scale (2 )
114
- with Client (cluster , loop = loop ) as client :
114
+ with Client (cluster ) as client :
115
115
future = client .submit (lambda x : x + 1 , 10 )
116
116
result = future .result (timeout = 10 )
117
117
assert result == 11
@@ -128,7 +128,7 @@ def dont_test_pod_template_yaml(docker_image, loop):
128
128
assert all (client .has_what ().values ())
129
129
130
130
131
- def test_pod_template_yaml_expand_env_vars (docker_image , loop ):
131
+ def test_pod_template_yaml_expand_env_vars (docker_image ):
132
132
try :
133
133
os .environ ["FOO_IMAGE" ] = docker_image
134
134
@@ -155,13 +155,13 @@ def test_pod_template_yaml_expand_env_vars(docker_image, loop):
155
155
with tmpfile (extension = "yaml" ) as fn :
156
156
with open (fn , mode = "w" ) as f :
157
157
yaml .dump (test_yaml , f )
158
- with KubeCluster (f .name , loop = loop ) as cluster :
158
+ with KubeCluster (f .name ) as cluster :
159
159
assert cluster .pod_template .spec .containers [0 ].image == docker_image
160
160
finally :
161
161
del os .environ ["FOO_IMAGE" ]
162
162
163
163
164
- def test_pod_template_dict (docker_image , loop ):
164
+ def test_pod_template_dict (docker_image ):
165
165
spec = {
166
166
"metadata" : {},
167
167
"restartPolicy" : "Never" ,
@@ -185,9 +185,9 @@ def test_pod_template_dict(docker_image, loop):
185
185
},
186
186
}
187
187
188
- with KubeCluster (spec , loop = loop ) as cluster :
188
+ with KubeCluster (spec ) as cluster :
189
189
cluster .scale (2 )
190
- with Client (cluster , loop = loop ) as client :
190
+ with Client (cluster ) as client :
191
191
future = client .submit (lambda x : x + 1 , 10 )
192
192
result = future .result ()
193
193
assert result == 11
@@ -202,7 +202,7 @@ def test_pod_template_dict(docker_image, loop):
202
202
assert all (client .has_what ().values ())
203
203
204
204
205
- def test_pod_template_minimal_dict (docker_image , loop ):
205
+ def test_pod_template_minimal_dict (docker_image ):
206
206
spec = {
207
207
"spec" : {
208
208
"containers" : [
@@ -224,9 +224,9 @@ def test_pod_template_minimal_dict(docker_image, loop):
224
224
}
225
225
}
226
226
227
- with KubeCluster (spec , loop = loop ) as cluster :
227
+ with KubeCluster (spec ) as cluster :
228
228
cluster .adapt ()
229
- with Client (cluster , loop = loop ) as client :
229
+ with Client (cluster ) as client :
230
230
future = client .submit (lambda x : x + 1 , 10 )
231
231
result = future .result ()
232
232
assert result == 11
@@ -264,9 +264,9 @@ def test_bad_args():
264
264
KubeCluster ({"kind" : "Pod" })
265
265
266
266
267
- def test_constructor_parameters (pod_spec , loop ):
267
+ def test_constructor_parameters (pod_spec ):
268
268
env = {"FOO" : "BAR" , "A" : 1 }
269
- with KubeCluster (pod_spec , name = "myname" , loop = loop , env = env ) as cluster :
269
+ with KubeCluster (pod_spec , name = "myname" , env = env ) as cluster :
270
270
pod = cluster .pod_template
271
271
272
272
var = [v for v in pod .spec .containers [0 ].env if v .name == "FOO" ]
@@ -380,15 +380,14 @@ def test_maximum(cluster):
380
380
assert "scale beyond maximum number of workers" in result .lower ()
381
381
382
382
383
- def test_extra_pod_config (docker_image , loop ):
383
+ def test_extra_pod_config (docker_image ):
384
384
"""
385
385
Test that our pod config merging process works fine
386
386
"""
387
387
with KubeCluster (
388
388
make_pod_spec (
389
389
docker_image , extra_pod_config = {"automountServiceAccountToken" : False }
390
390
),
391
- loop = loop ,
392
391
n_workers = 0 ,
393
392
) as cluster :
394
393
@@ -397,7 +396,7 @@ def test_extra_pod_config(docker_image, loop):
397
396
assert pod .spec .automount_service_account_token is False
398
397
399
398
400
- def test_extra_container_config (docker_image , loop ):
399
+ def test_extra_container_config (docker_image ):
401
400
"""
402
401
Test that our container config merging process works fine
403
402
"""
@@ -409,7 +408,6 @@ def test_extra_container_config(docker_image, loop):
409
408
"securityContext" : {"runAsUser" : 0 },
410
409
},
411
410
),
412
- loop = loop ,
413
411
n_workers = 0 ,
414
412
) as cluster :
415
413
@@ -419,15 +417,14 @@ def test_extra_container_config(docker_image, loop):
419
417
assert pod .spec .containers [0 ].security_context == {"runAsUser" : 0 }
420
418
421
419
422
- def test_container_resources_config (docker_image , loop ):
420
+ def test_container_resources_config (docker_image ):
423
421
"""
424
422
Test container resource requests / limits being set properly
425
423
"""
426
424
with KubeCluster (
427
425
make_pod_spec (
428
426
docker_image , memory_request = "0.5G" , memory_limit = "1G" , cpu_limit = "1"
429
427
),
430
- loop = loop ,
431
428
n_workers = 0 ,
432
429
) as cluster :
433
430
@@ -439,7 +436,7 @@ def test_container_resources_config(docker_image, loop):
439
436
assert "cpu" not in pod .spec .containers [0 ].resources .requests
440
437
441
438
442
- def test_extra_container_config_merge (docker_image , loop ):
439
+ def test_extra_container_config_merge (docker_image ):
443
440
"""
444
441
Test that our container config merging process works recursively fine
445
442
"""
@@ -452,7 +449,6 @@ def test_extra_container_config_merge(docker_image, loop):
452
449
"args" : ["last-item" ],
453
450
},
454
451
),
455
- loop = loop ,
456
452
n_workers = 0 ,
457
453
) as cluster :
458
454
@@ -464,7 +460,7 @@ def test_extra_container_config_merge(docker_image, loop):
464
460
assert pod .spec .containers [0 ].args [- 1 ] == "last-item"
465
461
466
462
467
- def test_worker_args (docker_image , loop ):
463
+ def test_worker_args (docker_image ):
468
464
"""
469
465
Test that dask-worker arguments are added to the container args
470
466
"""
@@ -474,7 +470,6 @@ def test_worker_args(docker_image, loop):
474
470
memory_limit = "5000M" ,
475
471
resources = "FOO=1 BAR=2" ,
476
472
),
477
- loop = loop ,
478
473
n_workers = 0 ,
479
474
) as cluster :
480
475
0 commit comments