4
4
from django .urls import reverse
5
5
6
6
from treeherder .model .models import Job
7
- from treeherder .perf .models import PerformanceDatum
7
+ from treeherder .perf .models import PerformanceDatum , PerformanceDatumReplicate
8
8
from treeherder .webapp .api import perfcompare_utils
9
9
10
10
NOW = datetime .datetime .now ()
@@ -224,6 +224,8 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
224
224
225
225
base_perf_data_values = [32.4 ]
226
226
new_perf_data_values = [40.2 ]
227
+ base_perf_data_replicates = [20 ]
228
+ new_perf_data_replicates = [15 ]
227
229
228
230
job = perf_jobs [0 ]
229
231
job .push = test_perfcomp_push
@@ -238,6 +240,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
238
240
)
239
241
perf_datum .push .time = job .push .time
240
242
perf_datum .push .save ()
243
+ PerformanceDatumReplicate .objects .create (performance_datum = perf_datum , value = 20 )
241
244
242
245
new_sig = create_signature (
243
246
signature_hash = (20 * "t2" ),
@@ -264,6 +267,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
264
267
)
265
268
perf_datum .push .time = job .push .time
266
269
perf_datum .push .save ()
270
+ PerformanceDatumReplicate .objects .create (performance_datum = perf_datum , value = 15 )
267
271
268
272
response = get_expected (
269
273
base_sig ,
@@ -293,8 +297,8 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
293
297
"new_retriggerable_job_ids" : [10 ],
294
298
"base_runs" : base_perf_data_values ,
295
299
"new_runs" : new_perf_data_values ,
296
- "base_runs_replicates" : [],
297
- "new_runs_replicates" : [],
300
+ "base_runs_replicates" : [20 ],
301
+ "new_runs_replicates" : [15 ],
298
302
"base_avg_value" : round (response ["base_avg_value" ], 2 ),
299
303
"new_avg_value" : round (response ["new_avg_value" ], 2 ),
300
304
"base_median_value" : round (response ["base_median_value" ], 2 ),
@@ -348,6 +352,38 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
348
352
assert response .status_code == 200
349
353
assert expected [0 ] == response .json ()[0 ]
350
354
355
+ # test the same comparison by setting the replicates values to true
356
+ query_params = (
357
+ "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
358
+ "}&no_subtests=true&replicates=true" .format (
359
+ try_repository .name ,
360
+ test_repository .name ,
361
+ test_perfcomp_push .revision ,
362
+ test_perfcomp_push_2 .revision ,
363
+ test_perf_signature .framework_id ,
364
+ )
365
+ )
366
+
367
+ expected_response = get_expected (
368
+ base_sig ,
369
+ new_sig ,
370
+ extra_options ,
371
+ test_option_collection ,
372
+ # get expected_response using the replicates values
373
+ new_perf_data_replicates ,
374
+ base_perf_data_replicates ,
375
+ )
376
+ response = client .get (reverse ("perfcompare-results" ) + query_params )
377
+
378
+ # test to see how the response changed based on the replicates values
379
+ assert response .status_code == 200
380
+ response .json ()[0 ]["base_avg_value" ] = round (expected_response ["base_avg_value" ], 2 )
381
+ response .json ()[0 ]["base_stddev" ] = round (expected_response ["base_stddev" ], 2 )
382
+ response .json ()[0 ]["base_median_value" ] = round (expected_response ["base_median_value" ], 2 )
383
+ response .json ()[0 ]["new_avg_value" ] = round (expected_response ["new_avg_value" ], 2 )
384
+ response .json ()[0 ]["new_stddev" ] = round (expected_response ["new_stddev" ], 2 )
385
+ response .json ()[0 ]["new_median_value" ] = round (expected_response ["new_median_value" ], 2 )
386
+
351
387
352
388
def test_perfcompare_results_without_base_signature (
353
389
client ,
0 commit comments