diff --git a/tests/webapp/api/test_perfcompare_api.py b/tests/webapp/api/test_perfcompare_api.py index 207f808f0bd..0379d64d772 100644 --- a/tests/webapp/api/test_perfcompare_api.py +++ b/tests/webapp/api/test_perfcompare_api.py @@ -39,7 +39,7 @@ def test_perfcompare_results_against_no_base( extra_options = "e10s fission stylo webrender" measurement_unit = "ms" base_application = "firefox" - new_application = "geckoview" + new_application = "firefox" base_sig = create_signature( signature_hash=(20 * "t1"), @@ -56,18 +56,18 @@ def test_perfcompare_results_against_no_base( base_perf_data_values = [32.4] new_perf_data_values = [40.2] - job = perf_jobs[0] - job.push = test_perfcomp_push - job.save() + base_job = perf_jobs[0] + base_job.push = test_perfcomp_push + base_job.save() perf_datum = PerformanceDatum.objects.create( value=base_perf_data_values[0], - push_timestamp=job.push.time, - job=job, - push=job.push, + push_timestamp=base_job.push.time, + job=base_job, + push=base_job.push, repository=try_repository, signature=base_sig, ) - perf_datum.push.time = job.push.time + perf_datum.push.time = base_job.push.time perf_datum.push.save() new_sig = create_signature( @@ -82,86 +82,95 @@ def test_perfcompare_results_against_no_base( application=new_application, ) - job = perf_jobs[1] - job.push = test_perfcomp_push_2 - job.save() + new_job = perf_jobs[1] + new_job.push = test_perfcomp_push_2 + new_job.save() perf_datum = PerformanceDatum.objects.create( value=new_perf_data_values[0], - push_timestamp=job.push.time, - job=job, - push=job.push, - repository=job.repository, + push_timestamp=new_job.push.time, + job=new_job, + push=new_job.push, + repository=new_job.repository, signature=new_sig, ) - perf_datum.push.time = job.push.time + perf_datum.push.time = new_job.push.time perf_datum.push.save() - response = get_expected( + test_perfcomp_push_2_revision = test_perfcomp_push_2.revision + + expected = get_expected( base_sig, new_sig, extra_options, test_option_collection, new_perf_data_values, base_perf_data_values, + None, + test_perfcomp_push_2_revision, + base_job.id, + new_job.id, + try_repository, # base_repository + test_repository, # new_repository + base_sig, # graph_sig ) - expected = [ - { - "base_rev": None, - "new_rev": test_perfcomp_push_2.revision, - "framework_id": base_sig.framework.id, - "platform": base_sig.platform.platform, - "suite": base_sig.suite, - "header_name": response["header_name"], - "base_repository_name": base_sig.repository.name, - "new_repository_name": new_sig.repository.name, - "base_app": "firefox", - "new_app": "geckoview", - "is_complete": response["is_complete"], - "base_measurement_unit": base_sig.measurement_unit, - "new_measurement_unit": new_sig.measurement_unit, - "base_retriggerable_job_ids": [1], - "new_retriggerable_job_ids": [4], - "base_runs": base_perf_data_values, - "new_runs": new_perf_data_values, - "base_runs_replicates": [], - "new_runs_replicates": [], - "base_avg_value": round(response["base_avg_value"], 2), - "new_avg_value": round(response["new_avg_value"], 2), - "base_median_value": round(response["base_median_value"], 2), - "new_median_value": round(response["new_median_value"], 2), - "test": base_sig.test, - "option_name": response["option_name"], - "extra_options": base_sig.extra_options, - "base_stddev": round(response["base_stddev"], 2), - "new_stddev": round(response["new_stddev"], 2), - "base_stddev_pct": round(response["base_stddev_pct"], 2), - "new_stddev_pct": round(response["new_stddev_pct"], 2), - "confidence": round(response["confidence"], 2), - "confidence_text": response["confidence_text"], - "delta_value": round(response["delta_value"], 2), - "delta_percentage": round(response["delta_pct"], 2), - "magnitude": round(response["magnitude"], 2), - "new_is_better": response["new_is_better"], - "lower_is_better": response["lower_is_better"], - "is_confident": response["is_confident"], - "more_runs_are_needed": response["more_runs_are_needed"], - "noise_metric": False, - "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?" - f"highlightedRevisions={test_perfcomp_push_2.revision}&" - f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" - f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" - f"timerange=86400", - "is_improvement": response["is_improvement"], - "is_regression": response["is_regression"], - "is_meaningful": response["is_meaningful"], - "base_parent_signature": response["base_parent_signature"], - "new_parent_signature": response["new_parent_signature"], - "base_signature_id": response["base_signature_id"], - "new_signature_id": response["new_signature_id"], - "has_subtests": response["has_subtests"], - }, - ] + # expected = [ + # { + # "base_rev": None, + # "new_rev": test_perfcomp_push_2.revision, + # "framework_id": base_sig.framework.id, + # "platform": base_sig.platform.platform, + # "suite": base_sig.suite, + # "header_name": response["header_name"], + # "base_repository_name": base_sig.repository.name, + # "new_repository_name": new_sig.repository.name, + # "base_app": base_sig.application, + # "new_app": new_sig.application, + # "is_complete": response["is_complete"], + # "base_measurement_unit": base_sig.measurement_unit, + # "new_measurement_unit": new_sig.measurement_unit, + # "base_retriggerable_job_ids": [base_job.id], + # "new_retriggerable_job_ids": [new_job.id], + # "base_runs": base_perf_data_values, + # "new_runs": new_perf_data_values, + # "base_runs_replicates": [], + # "new_runs_replicates": [], + # "base_avg_value": round(response["base_avg_value"], 2), + # "new_avg_value": round(response["new_avg_value"], 2), + # "base_median_value": round(response["base_median_value"], 2), + # "new_median_value": round(response["new_median_value"], 2), + # "test": base_sig.test, + # "option_name": response["option_name"], + # "extra_options": base_sig.extra_options, + # "base_stddev": round(response["base_stddev"], 2), + # "new_stddev": round(response["new_stddev"], 2), + # "base_stddev_pct": round(response["base_stddev_pct"], 2), + # "new_stddev_pct": round(response["new_stddev_pct"], 2), + # "confidence": round(response["confidence"], 2), + # "confidence_text": response["confidence_text"], + # "delta_value": round(response["delta_value"], 2), + # "delta_percentage": round(response["delta_pct"], 2), + # "magnitude": round(response["magnitude"], 2), + # "new_is_better": response["new_is_better"], + # "lower_is_better": response["lower_is_better"], + # "is_confident": response["is_confident"], + # "more_runs_are_needed": response["more_runs_are_needed"], + # "noise_metric": False, + # "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?" + # f"highlightedRevisions={test_perfcomp_push_2.revision}&" + # f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" + # f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" + # f"timerange=86400", + # "is_improvement": response["is_improvement"], + # "is_regression": response["is_regression"], + # "is_meaningful": response["is_meaningful"], + # "base_parent_signature": response["base_parent_signature"], + # "new_parent_signature": response["new_parent_signature"], + # "base_signature_id": response["base_signature_id"], + # "new_signature_id": response["new_signature_id"], + # "has_subtests": response["has_subtests"], + # }, + # ] query_params = ( "?base_repository={}&new_repository={}&new_revision={}&framework={" @@ -208,7 +217,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo( extra_options = "e10s fission stylo webrender" measurement_unit = "ms" base_application = "firefox" - new_application = "geckoview" + new_application = "firefox" base_sig = create_signature( signature_hash=(20 * "t1"), @@ -225,18 +234,18 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo( base_perf_data_values = [32.4] new_perf_data_values = [40.2] - job = perf_jobs[0] - job.push = test_perfcomp_push - job.save() + base_job = perf_jobs[0] + base_job.push = test_perfcomp_push + base_job.save() perf_datum = PerformanceDatum.objects.create( value=base_perf_data_values[0], - push_timestamp=job.push.time, - job=job, - push=job.push, + push_timestamp=base_job.push.time, + job=base_job, + push=base_job.push, repository=try_repository, signature=base_sig, ) - perf_datum.push.time = job.push.time + perf_datum.push.time = base_job.push.time perf_datum.push.save() new_sig = create_signature( @@ -251,86 +260,96 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo( application=new_application, ) - job = perf_jobs[1] - job.push = test_perfcomp_push_2 - job.save() + new_job = perf_jobs[1] + new_job.push = test_perfcomp_push_2 + new_job.save() perf_datum = PerformanceDatum.objects.create( value=new_perf_data_values[0], - push_timestamp=job.push.time, - job=job, - push=job.push, - repository=job.repository, + push_timestamp=new_job.push.time, + job=new_job, + push=new_job.push, + repository=new_job.repository, signature=new_sig, ) - perf_datum.push.time = job.push.time + perf_datum.push.time = new_job.push.time perf_datum.push.save() - response = get_expected( + test_perfcomp_push_revision = test_perfcomp_push.revision + test_perfcomp_push_2_revision = test_perfcomp_push_2.revision + + expected = get_expected( base_sig, new_sig, extra_options, test_option_collection, new_perf_data_values, base_perf_data_values, + test_perfcomp_push_revision, + test_perfcomp_push_2_revision, + base_job.id, + new_job.id, + try_repository, # base_repository + test_repository, # new_repository + base_sig, # graph_sig ) - expected = [ - { - "base_rev": test_perfcomp_push.revision, - "new_rev": test_perfcomp_push_2.revision, - "framework_id": base_sig.framework.id, - "platform": base_sig.platform.platform, - "suite": base_sig.suite, - "header_name": response["header_name"], - "base_repository_name": base_sig.repository.name, - "new_repository_name": new_sig.repository.name, - "base_app": "firefox", - "new_app": "geckoview", - "is_complete": response["is_complete"], - "base_measurement_unit": base_sig.measurement_unit, - "new_measurement_unit": new_sig.measurement_unit, - "base_retriggerable_job_ids": [1], - "new_retriggerable_job_ids": [4], - "base_runs": base_perf_data_values, - "new_runs": new_perf_data_values, - "base_runs_replicates": [], - "new_runs_replicates": [], - "base_avg_value": round(response["base_avg_value"], 2), - "new_avg_value": round(response["new_avg_value"], 2), - "base_median_value": round(response["base_median_value"], 2), - "new_median_value": round(response["new_median_value"], 2), - "test": base_sig.test, - "option_name": response["option_name"], - "extra_options": base_sig.extra_options, - "base_stddev": round(response["base_stddev"], 2), - "new_stddev": round(response["new_stddev"], 2), - "base_stddev_pct": round(response["base_stddev_pct"], 2), - "new_stddev_pct": round(response["new_stddev_pct"], 2), - "confidence": round(response["confidence"], 2), - "confidence_text": response["confidence_text"], - "delta_value": round(response["delta_value"], 2), - "delta_percentage": round(response["delta_pct"], 2), - "magnitude": round(response["magnitude"], 2), - "new_is_better": response["new_is_better"], - "lower_is_better": response["lower_is_better"], - "is_confident": response["is_confident"], - "more_runs_are_needed": response["more_runs_are_needed"], - "noise_metric": False, - "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&" - f"highlightedRevisions={test_perfcomp_push_2.revision}&" - f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" - f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" - f"timerange=604800", - "is_improvement": response["is_improvement"], - "is_regression": response["is_regression"], - "is_meaningful": response["is_meaningful"], - "base_parent_signature": response["base_parent_signature"], - "new_parent_signature": response["new_parent_signature"], - "base_signature_id": response["base_signature_id"], - "new_signature_id": response["new_signature_id"], - "has_subtests": response["has_subtests"], - }, - ] + # expected = [ + # { + # "base_rev": test_perfcomp_push.revision, + # "new_rev": test_perfcomp_push_2.revision, + # "framework_id": base_sig.framework.id, + # "platform": base_sig.platform.platform, + # "suite": base_sig.suite, + # "header_name": response["header_name"], + # "base_repository_name": base_sig.repository.name, + # "new_repository_name": new_sig.repository.name, + # "base_app": base_sig.application, + # "new_app": new_sig.application, + # "is_complete": response["is_complete"], + # "base_measurement_unit": base_sig.measurement_unit, + # "new_measurement_unit": new_sig.measurement_unit, + # "base_retriggerable_job_ids": [base_job.id], + # "new_retriggerable_job_ids": [new_job.id], + # "base_runs": base_perf_data_values, + # "new_runs": new_perf_data_values, + # "base_runs_replicates": [], + # "new_runs_replicates": [], + # "base_avg_value": round(response["base_avg_value"], 2), + # "new_avg_value": round(response["new_avg_value"], 2), + # "base_median_value": round(response["base_median_value"], 2), + # "new_median_value": round(response["new_median_value"], 2), + # "test": base_sig.test, + # "option_name": response["option_name"], + # "extra_options": base_sig.extra_options, + # "base_stddev": round(response["base_stddev"], 2), + # "new_stddev": round(response["new_stddev"], 2), + # "base_stddev_pct": round(response["base_stddev_pct"], 2), + # "new_stddev_pct": round(response["new_stddev_pct"], 2), + # "confidence": round(response["confidence"], 2), + # "confidence_text": response["confidence_text"], + # "delta_value": round(response["delta_value"], 2), + # "delta_percentage": round(response["delta_pct"], 2), + # "magnitude": round(response["magnitude"], 2), + # "new_is_better": response["new_is_better"], + # "lower_is_better": response["lower_is_better"], + # "is_confident": response["is_confident"], + # "more_runs_are_needed": response["more_runs_are_needed"], + # "noise_metric": False, + # "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&" + # f"highlightedRevisions={test_perfcomp_push_2.revision}&" + # f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" + # f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&" + # f"timerange=604800", + # "is_improvement": response["is_improvement"], + # "is_regression": response["is_regression"], + # "is_meaningful": response["is_meaningful"], + # "base_parent_signature": response["base_parent_signature"], + # "new_parent_signature": response["new_parent_signature"], + # "base_signature_id": response["base_signature_id"], + # "new_signature_id": response["new_signature_id"], + # "has_subtests": response["has_subtests"], + # }, + # ] query_params = ( "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={" @@ -391,86 +410,96 @@ def test_perfcompare_results_without_base_signature( application=new_application, ) - job = perf_jobs[1] - job.push = test_perfcomp_push_2 - job.save() + new_job = perf_jobs[1] + new_job.push = test_perfcomp_push_2 + new_job.save() perf_datum = PerformanceDatum.objects.create( value=new_perf_data_values[0], - push_timestamp=job.push.time, - job=job, - push=job.push, - repository=job.repository, + push_timestamp=new_job.push.time, + job=new_job, + push=new_job.push, + repository=new_job.repository, signature=new_sig, ) - perf_datum.push.time = job.push.time + perf_datum.push.time = new_job.push.time perf_datum.push.save() - response = get_expected( + test_perfcomp_push_revision = test_perfcomp_push.revision + test_perfcomp_push_2_revision = test_perfcomp_push_2.revision + + expected = get_expected( None, new_sig, extra_options, test_option_collection, new_perf_data_values, [], + test_perfcomp_push_revision, + test_perfcomp_push_2_revision, + None, + new_job.id, + try_repository, # base_repository + test_repository, # new_repository + new_sig, # graph_sig_hash ) - expected = [ - { - "base_rev": test_perfcomp_push.revision, - "new_rev": test_perfcomp_push_2.revision, - "framework_id": new_sig.framework.id, - "platform": new_sig.platform.platform, - "suite": new_sig.suite, - "header_name": response["header_name"], - "base_repository_name": try_repository.name, - "new_repository_name": new_sig.repository.name, - "base_app": "", - "new_app": "geckoview", - "is_complete": False, - "base_measurement_unit": "", - "new_measurement_unit": new_sig.measurement_unit, - "base_retriggerable_job_ids": [], - "new_retriggerable_job_ids": [job.id], - "base_runs": [], - "new_runs": new_perf_data_values, - "base_runs_replicates": [], - "new_runs_replicates": [], - "base_avg_value": round(response["base_avg_value"], 2), - "new_avg_value": round(response["new_avg_value"], 2), - "base_median_value": round(response["base_median_value"], 2), - "new_median_value": round(response["new_median_value"], 2), - "test": new_sig.test, - "option_name": response["option_name"], - "extra_options": new_sig.extra_options, - "base_stddev": round(response["base_stddev"], 2), - "new_stddev": round(response["new_stddev"], 2), - "base_stddev_pct": round(response["base_stddev_pct"], 2), - "new_stddev_pct": round(response["new_stddev_pct"], 2), - "confidence": round(response["confidence"], 2), - "confidence_text": response["confidence_text"], - "delta_value": round(response["delta_value"], 2), - "delta_percentage": round(response["delta_pct"], 2), - "magnitude": round(response["magnitude"], 2), - "new_is_better": response["new_is_better"], - "lower_is_better": response["lower_is_better"], - "is_confident": response["is_confident"], - "more_runs_are_needed": False, - "noise_metric": False, - "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&" - f"highlightedRevisions={test_perfcomp_push_2.revision}&" - f"series={try_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&" - f"series={test_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&" - f"timerange=604800", - "is_improvement": response["is_improvement"], - "is_regression": response["is_regression"], - "is_meaningful": response["is_meaningful"], - "base_parent_signature": response["base_parent_signature"], - "new_parent_signature": response["new_parent_signature"], - "base_signature_id": response["base_signature_id"], - "new_signature_id": response["new_signature_id"], - "has_subtests": response["has_subtests"], - }, - ] + # expected = [ + # { + # "base_rev": test_perfcomp_push.revision, + # "new_rev": test_perfcomp_push_2.revision, + # "framework_id": new_sig.framework.id, + # "platform": new_sig.platform.platform, + # "suite": new_sig.suite, + # "header_name": response["header_name"], + # "base_repository_name": try_repository.name, + # "new_repository_name": new_sig.repository.name, + # "base_app": "", + # "new_app": "geckoview", + # "is_complete": False, + # "base_measurement_unit": "", + # "new_measurement_unit": new_sig.measurement_unit, + # "base_retriggerable_job_ids": [], + # "new_retriggerable_job_ids": [job.id], + # "base_runs": [], + # "new_runs": new_perf_data_values, + # "base_runs_replicates": [], + # "new_runs_replicates": [], + # "base_avg_value": round(response["base_avg_value"], 2), + # "new_avg_value": round(response["new_avg_value"], 2), + # "base_median_value": round(response["base_median_value"], 2), + # "new_median_value": round(response["new_median_value"], 2), + # "test": new_sig.test, + # "option_name": response["option_name"], + # "extra_options": new_sig.extra_options, + # "base_stddev": round(response["base_stddev"], 2), + # "new_stddev": round(response["new_stddev"], 2), + # "base_stddev_pct": round(response["base_stddev_pct"], 2), + # "new_stddev_pct": round(response["new_stddev_pct"], 2), + # "confidence": round(response["confidence"], 2), + # "confidence_text": response["confidence_text"], + # "delta_value": round(response["delta_value"], 2), + # "delta_percentage": round(response["delta_pct"], 2), + # "magnitude": round(response["magnitude"], 2), + # "new_is_better": response["new_is_better"], + # "lower_is_better": response["lower_is_better"], + # "is_confident": response["is_confident"], + # "more_runs_are_needed": False, + # "noise_metric": False, + # "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&" + # f"highlightedRevisions={test_perfcomp_push_2.revision}&" + # f"series={try_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&" + # f"series={test_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&" + # f"timerange=604800", + # "is_improvement": response["is_improvement"], + # "is_regression": response["is_regression"], + # "is_meaningful": response["is_meaningful"], + # "base_parent_signature": response["base_parent_signature"], + # "new_parent_signature": response["new_parent_signature"], + # "base_signature_id": response["base_signature_id"], + # "new_signature_id": response["new_signature_id"], + # "has_subtests": response["has_subtests"], + # }, + # ] query_params = ( "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={" @@ -516,7 +545,7 @@ def test_perfcompare_results_subtests_support( extra_options = "e10s fission stylo webrender" measurement_unit = "ms" base_application = "firefox" - new_application = "geckoview" + new_application = "firefox" base_sig = create_signature( signature_hash=(20 * "t1"), @@ -596,8 +625,8 @@ def test_perfcompare_results_subtests_support( "header_name": response["header_name"], "base_repository_name": base_sig.repository.name, "new_repository_name": new_sig.repository.name, - "base_app": "firefox", - "new_app": "geckoview", + "base_app": base_sig.application, + "new_app": new_sig.application, "is_complete": response["is_complete"], "base_measurement_unit": base_sig.measurement_unit, "new_measurement_unit": new_sig.measurement_unit, @@ -935,12 +964,19 @@ def get_expected( test_option_collection, new_perf_data_values, base_perf_data_values, + test_perfcomp_push_revision, + test_perfcomp_push_2_revision, + base_job_id, + new_job_id, + base_repository, + new_repository, + graph_sig, ): sig = base_sig if base_sig else new_sig response = {"option_name": test_option_collection.get(sig.option_collection_id, "")} test_suite = perfcompare_utils.get_test_suite(sig.suite, sig.test) response["header_name"] = perfcompare_utils.get_header_name( - extra_options, response["option_name"], test_suite + extra_options, response["option_name"], test_suite, sig.application ) response["base_avg_value"] = perfcompare_utils.get_avg( base_perf_data_values, response["header_name"] @@ -951,7 +987,7 @@ def get_expected( response["base_median_value"] = perfcompare_utils.get_median(base_perf_data_values) response["new_median_value"] = perfcompare_utils.get_median(new_perf_data_values) response["delta_value"] = perfcompare_utils.get_delta_value( - response["new_avg_value"], response.get("base_avg_value") + response["new_avg_value"], response["base_avg_value"] ) response["delta_pct"] = perfcompare_utils.get_delta_percentage( response["delta_value"], response["base_avg_value"] @@ -1004,4 +1040,63 @@ def get_expected( response["has_subtests"] = (base_sig.has_subtests if base_sig else False) or ( new_sig.has_subtests if new_sig else False ) + + response = ( + { + "base_rev": test_perfcomp_push_revision, + "new_rev": test_perfcomp_push_2_revision, + "framework_id": sig.framework.id, + "platform": sig.platform.platform, + "suite": sig.suite, + "header_name": response["header_name"], + "base_repository_name": base_repository.name, + "new_repository_name": new_repository.name, + "base_app": base_sig.application if base_sig else "", + "new_app": new_sig.application, + "is_complete": response["is_complete"], + "base_measurement_unit": base_sig.measurement_unit if base_sig else "", + "new_measurement_unit": new_sig.measurement_unit, + "base_retriggerable_job_ids": [base_job_id] if base_job_id else [], + "new_retriggerable_job_ids": [new_job_id], + "base_runs": base_perf_data_values, + "new_runs": new_perf_data_values, + "base_runs_replicates": [], + "new_runs_replicates": [], + "base_avg_value": round(response["base_avg_value"], 2), + "new_avg_value": round(response["new_avg_value"], 2), + "base_median_value": round(response["base_median_value"], 2), + "new_median_value": round(response["new_median_value"], 2), + "test": sig.test, + "option_name": response["option_name"], + "extra_options": sig.extra_options, + "base_stddev": round(response["base_stddev"], 2), + "new_stddev": round(response["new_stddev"], 2), + "base_stddev_pct": round(response["base_stddev_pct"], 2), + "new_stddev_pct": round(response["new_stddev_pct"], 2), + "confidence": round(response["confidence"], 2), + "confidence_text": response["confidence_text"], + "delta_value": round(response["delta_value"], 2), + "delta_percentage": round(response["delta_pct"], 2), + "magnitude": round(response["magnitude"], 2), + "new_is_better": response["new_is_better"], + "lower_is_better": response["lower_is_better"], + "is_confident": response["is_confident"], + "more_runs_are_needed": response["more_runs_are_needed"], + "noise_metric": False, + "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push_revision}&" + f"highlightedRevisions={test_perfcomp_push_2_revision}&" + f"series={base_repository.name}%2C{graph_sig.signature_hash}%2C1%2C{graph_sig.framework.id}&" + f"series={new_repository.name}%2C{graph_sig.signature_hash}%2C1%2C{graph_sig.framework.id}&" + f"timerange=604800", + "is_improvement": response["is_improvement"], + "is_regression": response["is_regression"], + "is_meaningful": response["is_meaningful"], + "base_parent_signature": response["base_parent_signature"], + "new_parent_signature": response["new_parent_signature"], + "base_signature_id": response["base_signature_id"], + "new_signature_id": response["new_signature_id"], + "has_subtests": response["has_subtests"], + }, + ) + return response diff --git a/treeherder/webapp/api/perfcompare_utils.py b/treeherder/webapp/api/perfcompare_utils.py index 58a9bfbe674..e3e68dd64ca 100644 --- a/treeherder/webapp/api/perfcompare_utils.py +++ b/treeherder/webapp/api/perfcompare_utils.py @@ -32,8 +32,8 @@ def get_test_suite(suite, test): return suite if test == "" or test == suite else f"{suite} {test}" -def get_header_name(extra_options, option_name, test_suite): - name = f"{test_suite} {option_name} {extra_options}" +def get_header_name(extra_options, option_name, test_suite, application): + name = f"{test_suite} {option_name} {extra_options} {application}" return name diff --git a/treeherder/webapp/api/performance_data.py b/treeherder/webapp/api/performance_data.py index de79a1d50d0..e3caa2c1b67 100644 --- a/treeherder/webapp/api/performance_data.py +++ b/treeherder/webapp/api/performance_data.py @@ -892,7 +892,7 @@ def list(self, request): base_push = models.Push.objects.get( revision=base_rev, repository__name=base_repo_name ) - # Dynamically calculate an time interval based on the base and new push + # Dynamically calculate a time interval based on the base and new push interval = self._get_interval(base_push, new_push) else: # Comparing without a base needs a timerange from which to gather the data needed @@ -1261,10 +1261,13 @@ def _get_signatures_map(signatures, grouped_values, option_collection_map): suite = signature["suite"] test = signature["test"] extra_options = signature["extra_options"] + application = signature["application"] option_name = option_collection_map[signature["option_collection_id"]] test_suite = perfcompare_utils.get_test_suite(suite, test) platform = signature["platform__platform"] - header = perfcompare_utils.get_header_name(extra_options, option_name, test_suite) + header = perfcompare_utils.get_header_name( + extra_options, option_name, test_suite, application + ) sig_identifier = perfcompare_utils.get_sig_identifier(header, platform) if sig_identifier not in signatures_map or (