Skip to content

Commit 3faadb7

Browse files
Support for different benchmark tooling ( previously only ftsb_redisearch ). (#3)
* [wip] wip on ci integration * [wip] wip on ci * [wip] wip on poetry+ci * [add] adding coverage report * [add] added badges for CI and Coverage * [add] added release drafter automation * [add] enabled different benchmark tools on run. extended testing. kicked off results exporter
1 parent bfdd6d1 commit 3faadb7

File tree

12 files changed

+577
-110
lines changed

12 files changed

+577
-110
lines changed

.github/workflows/poetry-pytest.yml

+40-40
Original file line numberDiff line numberDiff line change
@@ -12,43 +12,43 @@ jobs:
1212
name: pytest
1313
runs-on: ubuntu-latest
1414
steps:
15-
- uses: actions/checkout@master
16-
- name: Set up Python 3.7
17-
uses: actions/setup-python@v1
18-
with:
19-
python-version: 3.7
20-
21-
- name: Install Poetry
22-
uses: dschep/[email protected]
23-
24-
- name: Cache Poetry virtualenv
25-
uses: actions/cache@v1
26-
id: cache
27-
with:
28-
path: ~/.virtualenvs
29-
key: poetry-${{ hashFiles('**/poetry.lock') }}
30-
restore-keys: |
31-
poetry-${{ hashFiles('**/poetry.lock') }}
32-
33-
- name: Set Poetry config
34-
run: |
35-
poetry config virtualenvs.in-project false
36-
poetry config virtualenvs.path ~/.virtualenvs
37-
38-
- name: Install Dependencies
39-
run: poetry install
40-
if: steps.cache.outputs.cache-hit != 'true'
41-
42-
- name: Test with pytest
43-
env:
44-
DJANGO_SETTINGS_MODULE: project.settings
45-
SECRETS_FILE: .confidential/ci.json
46-
run: poetry run pytest --cov redisbench_admin
47-
48-
- name: Upload coverage
49-
run: poetry run codecov -t ${{ secrets.CODECOV_TOKEN }}
50-
51-
- name: Publish to PyPI
52-
if: github.event_name == 'release'
53-
run: |
54-
poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }}
15+
- uses: actions/checkout@master
16+
- name: Set up Python 3.7
17+
uses: actions/setup-python@v1
18+
with:
19+
python-version: 3.7
20+
21+
- name: Install Poetry
22+
uses: dschep/[email protected]
23+
24+
- name: Cache Poetry virtualenv
25+
uses: actions/cache@v1
26+
id: cache
27+
with:
28+
path: ~/.virtualenvs
29+
key: poetry-${{ hashFiles('**/poetry.lock') }}
30+
restore-keys: |
31+
poetry-${{ hashFiles('**/poetry.lock') }}
32+
33+
- name: Set Poetry config
34+
run: |
35+
poetry config virtualenvs.in-project false
36+
poetry config virtualenvs.path ~/.virtualenvs
37+
38+
- name: Install Dependencies
39+
run: poetry install
40+
if: steps.cache.outputs.cache-hit != 'true'
41+
42+
- name: Test with pytest
43+
env:
44+
DJANGO_SETTINGS_MODULE: project.settings
45+
SECRETS_FILE: .confidential/ci.json
46+
run: poetry run pytest --cov redisbench_admin
47+
48+
- name: Upload coverage
49+
run: poetry run codecov -t ${{ secrets.CODECOV_TOKEN }}
50+
51+
- name: Publish to PyPI
52+
if: github.event_name == 'release'
53+
run: |
54+
poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }}

.github/workflows/release-drafter.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,6 @@ jobs:
1414
- uses: release-drafter/release-drafter@v5
1515
with:
1616
# (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml
17-
config-name: release-drafter-config.yml
17+
config-name: release-drafter-config.yml
1818
env:
1919
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

poetry.lock

+206-2
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

+3
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@ redis = "^3.5.3"
1818
boto3 = "^1.13.24"
1919
tqdm = "^4.46.1"
2020
toml = "^0.10.1"
21+
seaborn = "^0.10.1"
22+
matplotlib = "^3.2.1"
23+
redistimeseries = "^0.8.0"
2124

2225
[tool.poetry.dev-dependencies]
2326
pytest = "^4.6"

redisbench_admin/cli.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
import argparse
22
import sys
3+
34
import toml
4-
from redisbench_admin import __version__
55

6+
from redisbench_admin import __version__
67
from redisbench_admin.compare.args import create_compare_arguments
78
from redisbench_admin.compare.compare import compare_command_logic
89
from redisbench_admin.export.args import create_export_arguments

redisbench_admin/compare/compare.py

+9-37
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,12 @@
1-
import json
21
import os
32
import sys
43

54
import pandas as pd
65

6+
from redisbench_admin.utils.results import get_key_results_and_values
77
from redisbench_admin.utils.utils import retrieve_local_or_remote_input_json
88

99

10-
def get_key_results_and_values(baseline_json, step, use_result):
11-
selected_run = None
12-
metrics = {}
13-
if "key-results" in baseline_json and use_result in baseline_json["key-results"]:
14-
for name, value in baseline_json["key-results"][step][use_result][0].items():
15-
if name == "run-name":
16-
selected_run = value
17-
else:
18-
metrics[name] = value
19-
return selected_run, metrics
20-
21-
2210
def compare_command_logic(args):
2311
baseline_file = args.baseline_file
2412
comparison_file = args.comparison_file
@@ -82,13 +70,17 @@ def compare_command_logic(args):
8270
if enabled_fail:
8371
failing_metrics_serie = df.loc['pct_change'] <= max_negative_pct_change
8472
failing_metrics = df.loc['pct_change'][failing_metrics_serie]
85-
ammount_of_failing_metrics = len (failing_metrics)
73+
ammount_of_failing_metrics = len(failing_metrics)
8674
if ammount_of_failing_metrics > 0:
8775
df_keys = df.keys()
88-
print( "There was a total of {} metrics that presented a regression above {} %".format(ammount_of_failing_metrics,max_pct_change) )
89-
for pos,failed in enumerate(failing_metrics_serie):
76+
print("There was a total of {} metrics that presented a regression above {} %".format(
77+
ammount_of_failing_metrics, max_pct_change))
78+
for pos, failed in enumerate(failing_metrics_serie):
9079
if failed:
91-
print("\tMetric '{}' failed. with an percentage of change of {:.2f} %".format(df_keys[pos],df.loc['pct_change'][pos]))
80+
print("\tMetric '{}' failed. with an percentage of change of {:.2f} %".format(df_keys[pos],
81+
df.loc[
82+
'pct_change'][
83+
pos]))
9284
sys.exit(1)
9385
else:
9486
print("Skipping step: {} due to command line argument --steps not containing it ({})".format(step, ",".join(
@@ -116,23 +108,3 @@ def generate_comparison_dataframe_configs(benchmark_config, steps):
116108
step_df_dict[step]["sorting_metric_sorting_direction_map"][metric_name] = False if metric[
117109
"comparison"] == "higher-better" else True
118110
return step_df_dict
119-
120-
121-
def from_resultsDF_to_key_results_dict(resultsDataFrame, step, step_df_dict):
122-
key_results_dict = {}
123-
key_results_dict["table"] = json.loads(resultsDataFrame.to_json(orient='records'))
124-
best_result = resultsDataFrame.head(n=1)
125-
worst_result = resultsDataFrame.tail(n=1)
126-
first_sorting_col = step_df_dict[step]["sorting_metric_names"][0]
127-
first_sorting_median = resultsDataFrame[first_sorting_col].median()
128-
result_index = resultsDataFrame[first_sorting_col].sub(first_sorting_median).abs().idxmin()
129-
median_result = resultsDataFrame.loc[[result_index]]
130-
key_results_dict["best-result"] = json.loads(best_result.to_json(orient='records'))
131-
key_results_dict["median-result"] = json.loads(
132-
median_result.to_json(orient='records'))
133-
key_results_dict["worst-result"] = json.loads(worst_result.to_json(orient='records'))
134-
key_results_dict["reliability-analysis"] = {
135-
'var': json.loads(resultsDataFrame.var().to_json()),
136-
'stddev': json.loads(
137-
resultsDataFrame.std().to_json())}
138-
return key_results_dict

redisbench_admin/export/args.py

+3
Original file line numberDiff line numberDiff line change
@@ -5,4 +5,7 @@ def create_export_arguments(parser):
55
help="comma separated list of steps to be analyzed given the benchmark result files")
66
parser.add_argument('--exporter', type=str, default="csv",
77
help="exporter to be used ( either csv or redistimeseries )")
8+
parser.add_argument('--use-result', type=str, default="median-result",
9+
help="for each key-metric, use either worst-result, best-result, or median-result")
10+
parser.add_argument('--extra-tags', type=str, default="", help='comma separated extra tags in the format of key1=value,key2=value,...')
811
return parser

0 commit comments

Comments
 (0)