flood Load Test Report

Contents

  1. Test Summary
  2. Tests
    1. Test: eth_call
    2. Test: eth_getBalance
    3. Test: eth_getBlockByNumber
    4. Test: eth_getCode
    5. Test: eth_getLogs
    6. Test: eth_getStorageAt
    7. Test: eth_getTransactionByHash
    8. Test: eth_getTransactionCount
    9. Test: eth_getTransactionReceipt

Report Generation

This report was generated by flood. See the flood report --help command for report generation options. This report can be executed as a Python notebook using the .ipynb version of this file.

In [1]:
import IPython
import polars as pl
import toolstr
import tooltime

import flood

flood.styles = {}
In [2]:
# parameters

test_paths = {
    'eth_call': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_call',
    'eth_getBalance': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getBalance',
    'eth_getBlockByNumber': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getBlockByNumber',
    'eth_getCode': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getCode',
    'eth_getLogs': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getLogs',
    'eth_getStorageAt': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getStorageAt',
    'eth_getTransactionByHash': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionByHash',
    'eth_getTransactionCount': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionCount',
    'eth_getTransactionReceipt': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionReceipt',
}

metrics = ['success', 'throughput', 'p50', 'p90', 'p99']
In [3]:
# load data

test_payloads = {
    test_name: flood.load_single_run_test_payload(output_dir=test_path)
    for test_name, test_path in test_paths.items()
}

results_payloads = {
    test_name: flood.load_single_run_results_payload(output_dir=test_path)
    for test_name, test_path in test_paths.items()
}

Test Summary

In [4]:
# test list

toolstr.print_text_box('Tests')
for t, test_name in enumerate(results_payloads.keys()):
    print(str(t + 1) + '.', test_name)
┌───────┐
│ Tests │
└───────┘
1. eth_call
2. eth_getBalance
3. eth_getBlockByNumber
4. eth_getCode
5. eth_getLogs
6. eth_getStorageAt
7. eth_getTransactionByHash
8. eth_getTransactionCount
9. eth_getTransactionReceipt
In [5]:
# test durations

time_per_test = {}
time_per_condition = {}

for test_name in results_payloads:
    results = results_payloads[test_name]["results"]
    time_per_test[test_name] = 0
    for condition_name in results.keys():
        time_per_condition.setdefault(condition_name, 0)
        time = sum(results[condition_name]["actual_duration"]) + sum(
            results[condition_name]["final_wait_time"]
        )
        time_per_test[test_name] += time
        time_per_condition[condition_name] += time

toolstr.print_text_box('Total time')
toolstr.print(tooltime.timelength_to_phrase(int(sum(time_per_test.values()))))
print()

toolstr.print_text_box('Total time per test')
rows = list(time_per_test.items())
toolstr.print_table(rows, labels=['test', 'time (s)'])

toolstr.print_text_box('Total time per condition')
rows = list(time_per_condition.items())
toolstr.print_table(rows, labels=['condition', 'time (s)'])
┌────────────┐
│ Total time │
└────────────┘
46 minutes, 53 seconds

┌─────────────────────┐
│ Total time per test │
└─────────────────────┘
                       test  │  time (s)  
─────────────────────────────┼────────────
                   eth_call  │    300.23  
             eth_getBalance  │       300  
       eth_getBlockByNumber  │    300.18  
                eth_getCode  │       300  
                eth_getLogs  │    411.26  
           eth_getStorageAt  │       300  
   eth_getTransactionByHash  │    300.29  
    eth_getTransactionCount  │    301.01  
  eth_getTransactionReceipt  │     300.9  
┌──────────────────────────┐
│ Total time per condition │
└──────────────────────────┘
  condition  │  time (s)  
─────────────┼────────────
      node1  │   1,420.5  
      node2  │  1,393.36  

Tests

Test: eth_call

In [6]:
# load test results

test_name = 'eth_call'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [7]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌─────────────────────┐
│ eth_call parameters │
└─────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [8]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  0.993441  │         100.7%  
       4,096  │  1.000000  │  1.000000  │         100.0%  
       8,192  │  1.000000  │  0.923796  │         108.2%  
      16,384  │  1.000000  │  0.523625  │         191.0%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,023.9  │    1,024.0  │         100.0%  
       2,048  │    2,048.0  │    2,034.5  │         100.7%  
       4,096  │    4,096.1  │    4,095.4  │         100.0%  
       8,192  │    8,191.9  │    7,544.6  │         108.6%  
      16,384  │   16,384.3  │    8,544.3  │         191.8%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000818  │   0.003432  │          23.8%  
       2,048  │   0.000729  │   0.003214  │          22.7%  
       4,096  │   0.000646  │   0.003098  │          20.8%  
       8,192  │   0.000586  │   0.004963  │          11.8%  
      16,384  │   0.000538  │   0.064327  │           0.8%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.001192  │   0.005433  │          21.9%  
       2,048  │   0.000972  │   0.005892  │          16.5%  
       4,096  │   0.000861  │   0.006213  │          13.9%  
       8,192  │   0.000779  │   0.153912  │           0.5%  
      16,384  │   0.000726  │   0.132717  │           0.5%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.008982  │   0.007990  │         112.4%  
       2,048  │   0.001274  │   0.624675  │           0.2%  
       4,096  │   0.001108  │   0.041268  │           2.7%  
       8,192  │   0.001107  │   0.358046  │           0.3%  
      16,384  │   0.002789  │   0.307765  │           0.9%  
In [9]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [10]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: socket: too many open files
In [11]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0346493029.998985307201023.8555851.00.0001750.0011350.0008180.0011920.0029990.0089820.0228050.005247
20482048.0707263029.998964614402048.0190141.00.0001870.0007610.0007290.0009720.0010550.0012740.014170.000757
40964096.1594143029.9990771228814096.0986681.00.0001560.0006670.0006460.0008610.0009340.0011080.0094650.000445
81928192.0562113029.9999162457618191.8770611.00.0001610.0006150.0005860.0007790.0008510.0011070.0159390.000656
1638416384.5373863029.99907749152116384.3212381.00.0001430.0006110.0005380.0007260.0008260.0027890.0172780.000396
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0376033029.999875307211023.9602971.00.0003890.0036060.0034320.0054330.006350.007990.0419040.002265
20482048.0670223029.999507614412034.5080020.9934410.0000410.0180450.0032140.0058920.0077830.6246751.1411670.00185
40964096.1535263029.999121228814095.4325311.00.0003450.0046920.0030980.0062130.0082070.0412680.3301310.005281
81928192.0532273029.9999272457617544.5728860.9237960.0000340.0496050.0049630.1539120.1918970.3580461.7912450.0923
1638416384.389573029.9994094915228544.2600160.5236250.000030.0619870.0643270.1327170.1542090.3077652.5940340.122919

Test: eth_getBalance

In [12]:
# load test results

test_name = 'eth_getBalance'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [13]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌───────────────────────────┐
│ eth_getBalance parameters │
└───────────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [14]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  1.000000  │         100.0%  
       4,096  │  1.000000  │  1.000000  │         100.0%  
       8,192  │  1.000000  │  1.000000  │         100.0%  
      16,384  │  1.000000  │  0.969753  │         103.1%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,023.7  │    1,024.0  │         100.0%  
       2,048  │    2,048.0  │    2,048.0  │         100.0%  
       4,096  │    4,096.0  │    4,096.1  │         100.0%  
       8,192  │    8,192.3  │    8,192.2  │         100.0%  
      16,384  │   16,384.2  │   15,888.2  │         103.1%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000479  │   0.000730  │          65.6%  
       2,048  │   0.000413  │   0.000667  │          61.9%  
       4,096  │   0.000353  │   0.000607  │          58.1%  
       8,192  │   0.000315  │   0.000559  │          56.3%  
      16,384  │   0.000301  │   0.000541  │          55.6%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.003601  │   0.000878  │         410.3%  
       2,048  │   0.000750  │   0.000809  │          92.7%  
       4,096  │   0.000481  │   0.000745  │          64.6%  
       8,192  │   0.000424  │   0.000688  │          61.6%  
      16,384  │   0.000401  │   0.000696  │          57.6%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.010349  │   0.001086  │         952.9%  
       2,048  │   0.009045  │   0.000978  │         925.0%  
       4,096  │   0.004626  │   0.000943  │         490.5%  
       8,192  │   0.000639  │   0.001196  │          53.5%  
      16,384  │   0.001177  │   0.005867  │          20.1%  
In [15]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)