|
1 |
| -import json |
2 | 1 | import os
|
3 | 2 | import sys
|
4 | 3 |
|
5 | 4 | import pandas as pd
|
6 | 5 |
|
| 6 | +from redisbench_admin.utils.results import get_key_results_and_values |
7 | 7 | from redisbench_admin.utils.utils import retrieve_local_or_remote_input_json
|
8 | 8 |
|
9 | 9 |
|
10 |
| -def get_key_results_and_values(baseline_json, step, use_result): |
11 |
| - selected_run = None |
12 |
| - metrics = {} |
13 |
| - if "key-results" in baseline_json and use_result in baseline_json["key-results"]: |
14 |
| - for name, value in baseline_json["key-results"][step][use_result][0].items(): |
15 |
| - if name == "run-name": |
16 |
| - selected_run = value |
17 |
| - else: |
18 |
| - metrics[name] = value |
19 |
| - return selected_run, metrics |
20 |
| - |
21 |
| - |
22 | 10 | def compare_command_logic(args):
|
23 | 11 | baseline_file = args.baseline_file
|
24 | 12 | comparison_file = args.comparison_file
|
@@ -82,13 +70,17 @@ def compare_command_logic(args):
|
82 | 70 | if enabled_fail:
|
83 | 71 | failing_metrics_serie = df.loc['pct_change'] <= max_negative_pct_change
|
84 | 72 | failing_metrics = df.loc['pct_change'][failing_metrics_serie]
|
85 |
| - ammount_of_failing_metrics = len (failing_metrics) |
| 73 | + ammount_of_failing_metrics = len(failing_metrics) |
86 | 74 | if ammount_of_failing_metrics > 0:
|
87 | 75 | df_keys = df.keys()
|
88 |
| - print( "There was a total of {} metrics that presented a regression above {} %".format(ammount_of_failing_metrics,max_pct_change) ) |
89 |
| - for pos,failed in enumerate(failing_metrics_serie): |
| 76 | + print("There was a total of {} metrics that presented a regression above {} %".format( |
| 77 | + ammount_of_failing_metrics, max_pct_change)) |
| 78 | + for pos, failed in enumerate(failing_metrics_serie): |
90 | 79 | if failed:
|
91 |
| - print("\tMetric '{}' failed. with an percentage of change of {:.2f} %".format(df_keys[pos],df.loc['pct_change'][pos])) |
| 80 | + print("\tMetric '{}' failed. with an percentage of change of {:.2f} %".format(df_keys[pos], |
| 81 | + df.loc[ |
| 82 | + 'pct_change'][ |
| 83 | + pos])) |
92 | 84 | sys.exit(1)
|
93 | 85 | else:
|
94 | 86 | print("Skipping step: {} due to command line argument --steps not containing it ({})".format(step, ",".join(
|
@@ -116,23 +108,3 @@ def generate_comparison_dataframe_configs(benchmark_config, steps):
|
116 | 108 | step_df_dict[step]["sorting_metric_sorting_direction_map"][metric_name] = False if metric[
|
117 | 109 | "comparison"] == "higher-better" else True
|
118 | 110 | return step_df_dict
|
119 |
| - |
120 |
| - |
121 |
| -def from_resultsDF_to_key_results_dict(resultsDataFrame, step, step_df_dict): |
122 |
| - key_results_dict = {} |
123 |
| - key_results_dict["table"] = json.loads(resultsDataFrame.to_json(orient='records')) |
124 |
| - best_result = resultsDataFrame.head(n=1) |
125 |
| - worst_result = resultsDataFrame.tail(n=1) |
126 |
| - first_sorting_col = step_df_dict[step]["sorting_metric_names"][0] |
127 |
| - first_sorting_median = resultsDataFrame[first_sorting_col].median() |
128 |
| - result_index = resultsDataFrame[first_sorting_col].sub(first_sorting_median).abs().idxmin() |
129 |
| - median_result = resultsDataFrame.loc[[result_index]] |
130 |
| - key_results_dict["best-result"] = json.loads(best_result.to_json(orient='records')) |
131 |
| - key_results_dict["median-result"] = json.loads( |
132 |
| - median_result.to_json(orient='records')) |
133 |
| - key_results_dict["worst-result"] = json.loads(worst_result.to_json(orient='records')) |
134 |
| - key_results_dict["reliability-analysis"] = { |
135 |
| - 'var': json.loads(resultsDataFrame.var().to_json()), |
136 |
| - 'stddev': json.loads( |
137 |
| - resultsDataFrame.std().to_json())} |
138 |
| - return key_results_dict |
0 commit comments