Skip to content

Commit 34758e3

Browse files
cpu status properly summarizing even on high client overhead (#317)
1 parent f1f1dd0 commit 34758e3

File tree

5 files changed

+106
-23
lines changed

5 files changed

+106
-23
lines changed

poetry.lock

+65-8
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

+2-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ python = "^3.6.1"
1414
humanize = "^2.4.0"
1515
requests = "^2.23.0"
1616
py_cpuinfo = "^5.0.0"
17-
redis = "^4.1.4"
17+
redis = { git = "https://github.com/redis/redis-py.git", tag = "v4.2.0rc3" }
1818
boto3 = "^1.13.24"
1919
tqdm = "^4.46.1"
2020
toml = "^0.10.1"
@@ -43,6 +43,7 @@ psutil = "^5.6.6"
4343
scipy = "^1.3.3"
4444
scikit-learn = "^0.22.2"
4545
Jinja2 = "^3.0.3"
46+
watchdog = "^2.1.6"
4647

4748
[tool.poetry.dev-dependencies]
4849
pytest = "^4.6"

redisbench_admin/run/metrics.py

+24-13
Original file line numberDiff line numberDiff line change
@@ -132,32 +132,43 @@ def collect_redis_metrics(
132132

133133

134134
def from_info_to_overall_shard_cpu(benchmark_cpu_stats):
135+
import numpy as np
136+
135137
total_avg_cpu_pct = 0.0
136138
res = {}
137139
for shard_n, cpu_stats_arr in benchmark_cpu_stats.items():
138140
avg_cpu_pct = None
141+
shards_cpu_arr = []
139142
# we need at least 2 elements to compute the cpu usage
140143
if len(cpu_stats_arr) >= 2:
141-
stats_start_pos = cpu_stats_arr[0]
142-
stats_end_pos = cpu_stats_arr[len(cpu_stats_arr) - 1]
143-
if (
144-
"server_time_usec" in stats_end_pos
145-
and "server_time_usec" in stats_start_pos
146-
):
147-
start_ts_micros = stats_start_pos["server_time_usec"]
148-
end_ts_micros = stats_end_pos["server_time_usec"]
149-
start_total_cpu = get_total_cpu(stats_start_pos)
150-
end_total_cpu = get_total_cpu(stats_end_pos)
151-
total_secs = (end_ts_micros - start_ts_micros) / 1000000
152-
total_cpu_usage = end_total_cpu - start_total_cpu
153-
avg_cpu_pct = 100.0 * (total_cpu_usage / total_secs)
144+
for start_pos in range(0, len(cpu_stats_arr) - 2):
145+
avg_cpu_pct = get_avg_cpu_pct(
146+
avg_cpu_pct, cpu_stats_arr[start_pos], cpu_stats_arr[start_pos + 1]
147+
)
148+
if avg_cpu_pct is not None:
149+
shards_cpu_arr.append(avg_cpu_pct)
150+
151+
avg_cpu_pct = np.percentile(shards_cpu_arr, 75)
154152

155153
res[shard_n] = avg_cpu_pct
156154
if avg_cpu_pct is not None:
157155
total_avg_cpu_pct += avg_cpu_pct
158156
return total_avg_cpu_pct, res
159157

160158

159+
def get_avg_cpu_pct(avg_cpu_pct, stats_start_pos, stats_end_pos):
160+
avg_cpu_pct = None
161+
if "server_time_usec" in stats_end_pos and "server_time_usec" in stats_start_pos:
162+
start_ts_micros = stats_start_pos["server_time_usec"]
163+
end_ts_micros = stats_end_pos["server_time_usec"]
164+
start_total_cpu = get_total_cpu(stats_start_pos)
165+
end_total_cpu = get_total_cpu(stats_end_pos)
166+
total_secs = (end_ts_micros - start_ts_micros) / 1000000
167+
total_cpu_usage = end_total_cpu - start_total_cpu
168+
avg_cpu_pct = 100.0 * (total_cpu_usage / total_secs)
169+
return avg_cpu_pct
170+
171+
161172
def get_total_cpu(info_data):
162173
total_cpu = 0.0
163174
total_cpu = total_cpu + info_data["used_cpu_sys"]

redisbench_admin/run_local/run_local.py

+5
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,11 @@ def run_local_command_logic(args, project_name, project_version):
306306
total_shards_cpu_usage
307307
)
308308
)
309+
logging.info(
310+
"CPU MAP: {}".format(
311+
json.dumps(cpu_usage_map, indent=2)
312+
)
313+
)
309314
benchmark_duration_seconds = (
310315
calculate_client_tool_duration_and_check(
311316
benchmark_end_time, benchmark_start_time

redisbench_admin/utils/local.py

+10-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,16 @@ def check_dataset_local_requirements(
2828
full_path = None
2929
tmp_path = None
3030
if dbconfig_keyname in benchmark_config:
31-
for k in benchmark_config[dbconfig_keyname]:
31+
entry_type = type(benchmark_config[dbconfig_keyname])
32+
if entry_type == list:
33+
for k in benchmark_config[dbconfig_keyname]:
34+
if "dataset" in k:
35+
dataset = k["dataset"]
36+
full_path = k["dataset"]
37+
if "dataset_name" in k:
38+
dataset_name = k["dataset_name"]
39+
if entry_type == dict:
40+
k = benchmark_config[dbconfig_keyname]
3241
if "dataset" in k:
3342
dataset = k["dataset"]
3443
full_path = k["dataset"]

0 commit comments

Comments
 (0)