flood
Load Test Report¶This report was generated by flood
. See the flood report --help
command for report generation options. This report can be executed as a Python notebook using the .ipynb
version of this file.
import IPython
import polars as pl
import toolstr
import tooltime
import flood
flood.styles = {}
# parameters
test_paths = {
'eth_call': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_call',
'eth_getBalance': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getBalance',
'eth_getBlockByNumber': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getBlockByNumber',
'eth_getCode': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getCode',
'eth_getLogs': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getLogs',
'eth_getStorageAt': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getStorageAt',
'eth_getTransactionByHash': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionByHash',
'eth_getTransactionCount': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionCount',
'eth_getTransactionReceipt': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionReceipt',
}
metrics = ['success', 'throughput', 'p50', 'p90', 'p99']
# load data
test_payloads = {
test_name: flood.load_single_run_test_payload(output_dir=test_path)
for test_name, test_path in test_paths.items()
}
results_payloads = {
test_name: flood.load_single_run_results_payload(output_dir=test_path)
for test_name, test_path in test_paths.items()
}
# test list
toolstr.print_text_box('Tests')
for t, test_name in enumerate(results_payloads.keys()):
print(str(t + 1) + '.', test_name)
# test durations
time_per_test = {}
time_per_condition = {}
for test_name in results_payloads:
results = results_payloads[test_name]["results"]
time_per_test[test_name] = 0
for condition_name in results.keys():
time_per_condition.setdefault(condition_name, 0)
time = sum(results[condition_name]["actual_duration"]) + sum(
results[condition_name]["final_wait_time"]
)
time_per_test[test_name] += time
time_per_condition[condition_name] += time
toolstr.print_text_box('Total time')
toolstr.print(tooltime.timelength_to_phrase(int(sum(time_per_test.values()))))
print()
toolstr.print_text_box('Total time per test')
rows = list(time_per_test.items())
toolstr.print_table(rows, labels=['test', 'time (s)'])
toolstr.print_text_box('Total time per condition')
rows = list(time_per_condition.items())
toolstr.print_table(rows, labels=['condition', 'time (s)'])
# load test results
test_name = 'eth_call'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getBalance'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getBlockByNumber'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getCode'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getLogs'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getStorageAt'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getTransactionByHash'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getTransactionCount'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)
# load test results
test_name = 'eth_getTransactionReceipt'
results_payload = results_payloads[test_name]
results = results_payload['results']
# show test metadata
toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
# show result tables
flood.print_metric_tables(results, metrics=metrics, comparison=True)
# show result figures
colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
test_name=test_name,
outputs=results,
latency_yscale_log=True,
colors=colors,
)
# show errors
toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
unique_errors.setdefault(name, set())
unique_errors[name] |= {
error for error_list in results[name]['errors'] for error in error_list
}
print(name)
for error in unique_errors[name]:
print('-', error)
if n != len(results) - 1:
print()
# show complete results
for name in results.keys():
toolstr.print_text_box(name + " Complete Results")
df = pl.DataFrame(results[name])
df = df.drop(
"status_codes",
"errors",
"first_request_timestamp",
"last_request_timestamp",
"last_response_timestamp",
)
IPython.display.display(df)