flood Load Test Report

Contents

  1. Test Summary
  2. Tests
    1. Test: eth_call
    2. Test: eth_getBalance
    3. Test: eth_getBlockByNumber
    4. Test: eth_getCode
    5. Test: eth_getLogs
    6. Test: eth_getStorageAt
    7. Test: eth_getTransactionByHash
    8. Test: eth_getTransactionCount
    9. Test: eth_getTransactionReceipt

Report Generation

This report was generated by flood. See the flood report --help command for report generation options. This report can be executed as a Python notebook using the .ipynb version of this file.

In [1]:
import IPython
import polars as pl
import toolstr
import tooltime

import flood

flood.styles = {}
In [2]:
# parameters

test_paths = {
    'eth_call': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_call',
    'eth_getBalance': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getBalance',
    'eth_getBlockByNumber': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getBlockByNumber',
    'eth_getCode': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getCode',
    'eth_getLogs': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getLogs',
    'eth_getStorageAt': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getStorageAt',
    'eth_getTransactionByHash': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionByHash',
    'eth_getTransactionCount': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionCount',
    'eth_getTransactionReceipt': '/Users/stormslivkoff/data/load_tests/june_5_load_tests/eth_getTransactionReceipt',
}

metrics = ['success', 'throughput', 'p50', 'p90', 'p99']
In [3]:
# load data

test_payloads = {
    test_name: flood.load_single_run_test_payload(output_dir=test_path)
    for test_name, test_path in test_paths.items()
}

results_payloads = {
    test_name: flood.load_single_run_results_payload(output_dir=test_path)
    for test_name, test_path in test_paths.items()
}

Test Summary

In [4]:
# test list

toolstr.print_text_box('Tests')
for t, test_name in enumerate(results_payloads.keys()):
    print(str(t + 1) + '.', test_name)
┌───────┐
│ Tests │
└───────┘
1. eth_call
2. eth_getBalance
3. eth_getBlockByNumber
4. eth_getCode
5. eth_getLogs
6. eth_getStorageAt
7. eth_getTransactionByHash
8. eth_getTransactionCount
9. eth_getTransactionReceipt
In [5]:
# test durations

time_per_test = {}
time_per_condition = {}

for test_name in results_payloads:
    results = results_payloads[test_name]["results"]
    time_per_test[test_name] = 0
    for condition_name in results.keys():
        time_per_condition.setdefault(condition_name, 0)
        time = sum(results[condition_name]["actual_duration"]) + sum(
            results[condition_name]["final_wait_time"]
        )
        time_per_test[test_name] += time
        time_per_condition[condition_name] += time

toolstr.print_text_box('Total time')
toolstr.print(tooltime.timelength_to_phrase(int(sum(time_per_test.values()))))
print()

toolstr.print_text_box('Total time per test')
rows = list(time_per_test.items())
toolstr.print_table(rows, labels=['test', 'time (s)'])

toolstr.print_text_box('Total time per condition')
rows = list(time_per_condition.items())
toolstr.print_table(rows, labels=['condition', 'time (s)'])
┌────────────┐
│ Total time │
└────────────┘
46 minutes, 53 seconds

┌─────────────────────┐
│ Total time per test │
└─────────────────────┘
                       test  │  time (s)  
─────────────────────────────┼────────────
                   eth_call  │    300.23  
             eth_getBalance  │       300  
       eth_getBlockByNumber  │    300.18  
                eth_getCode  │       300  
                eth_getLogs  │    411.26  
           eth_getStorageAt  │       300  
   eth_getTransactionByHash  │    300.29  
    eth_getTransactionCount  │    301.01  
  eth_getTransactionReceipt  │     300.9  
┌──────────────────────────┐
│ Total time per condition │
└──────────────────────────┘
  condition  │  time (s)  
─────────────┼────────────
      node1  │   1,420.5  
      node2  │  1,393.36  

Tests

Test: eth_call

In [6]:
# load test results

test_name = 'eth_call'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [7]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌─────────────────────┐
│ eth_call parameters │
└─────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [8]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  0.993441  │         100.7%  
       4,096  │  1.000000  │  1.000000  │         100.0%  
       8,192  │  1.000000  │  0.923796  │         108.2%  
      16,384  │  1.000000  │  0.523625  │         191.0%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,023.9  │    1,024.0  │         100.0%  
       2,048  │    2,048.0  │    2,034.5  │         100.7%  
       4,096  │    4,096.1  │    4,095.4  │         100.0%  
       8,192  │    8,191.9  │    7,544.6  │         108.6%  
      16,384  │   16,384.3  │    8,544.3  │         191.8%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000818  │   0.003432  │          23.8%  
       2,048  │   0.000729  │   0.003214  │          22.7%  
       4,096  │   0.000646  │   0.003098  │          20.8%  
       8,192  │   0.000586  │   0.004963  │          11.8%  
      16,384  │   0.000538  │   0.064327  │           0.8%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.001192  │   0.005433  │          21.9%  
       2,048  │   0.000972  │   0.005892  │          16.5%  
       4,096  │   0.000861  │   0.006213  │          13.9%  
       8,192  │   0.000779  │   0.153912  │           0.5%  
      16,384  │   0.000726  │   0.132717  │           0.5%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.008982  │   0.007990  │         112.4%  
       2,048  │   0.001274  │   0.624675  │           0.2%  
       4,096  │   0.001108  │   0.041268  │           2.7%  
       8,192  │   0.001107  │   0.358046  │           0.3%  
      16,384  │   0.002789  │   0.307765  │           0.9%  
In [9]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [10]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: socket: too many open files
In [11]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0346493029.998985307201023.8555851.00.0001750.0011350.0008180.0011920.0029990.0089820.0228050.005247
20482048.0707263029.998964614402048.0190141.00.0001870.0007610.0007290.0009720.0010550.0012740.014170.000757
40964096.1594143029.9990771228814096.0986681.00.0001560.0006670.0006460.0008610.0009340.0011080.0094650.000445
81928192.0562113029.9999162457618191.8770611.00.0001610.0006150.0005860.0007790.0008510.0011070.0159390.000656
1638416384.5373863029.99907749152116384.3212381.00.0001430.0006110.0005380.0007260.0008260.0027890.0172780.000396
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0376033029.999875307211023.9602971.00.0003890.0036060.0034320.0054330.006350.007990.0419040.002265
20482048.0670223029.999507614412034.5080020.9934410.0000410.0180450.0032140.0058920.0077830.6246751.1411670.00185
40964096.1535263029.999121228814095.4325311.00.0003450.0046920.0030980.0062130.0082070.0412680.3301310.005281
81928192.0532273029.9999272457617544.5728860.9237960.0000340.0496050.0049630.1539120.1918970.3580461.7912450.0923
1638416384.389573029.9994094915228544.2600160.5236250.000030.0619870.0643270.1327170.1542090.3077652.5940340.122919

Test: eth_getBalance

In [12]:
# load test results

test_name = 'eth_getBalance'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [13]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌───────────────────────────┐
│ eth_getBalance parameters │
└───────────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [14]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  1.000000  │         100.0%  
       4,096  │  1.000000  │  1.000000  │         100.0%  
       8,192  │  1.000000  │  1.000000  │         100.0%  
      16,384  │  1.000000  │  0.969753  │         103.1%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,023.7  │    1,024.0  │         100.0%  
       2,048  │    2,048.0  │    2,048.0  │         100.0%  
       4,096  │    4,096.0  │    4,096.1  │         100.0%  
       8,192  │    8,192.3  │    8,192.2  │         100.0%  
      16,384  │   16,384.2  │   15,888.2  │         103.1%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000479  │   0.000730  │          65.6%  
       2,048  │   0.000413  │   0.000667  │          61.9%  
       4,096  │   0.000353  │   0.000607  │          58.1%  
       8,192  │   0.000315  │   0.000559  │          56.3%  
      16,384  │   0.000301  │   0.000541  │          55.6%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.003601  │   0.000878  │         410.3%  
       2,048  │   0.000750  │   0.000809  │          92.7%  
       4,096  │   0.000481  │   0.000745  │          64.6%  
       8,192  │   0.000424  │   0.000688  │          61.6%  
      16,384  │   0.000401  │   0.000696  │          57.6%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.010349  │   0.001086  │         952.9%  
       2,048  │   0.009045  │   0.000978  │         925.0%  
       4,096  │   0.004626  │   0.000943  │         490.5%  
       8,192  │   0.000639  │   0.001196  │          53.5%  
      16,384  │   0.001177  │   0.005867  │          20.1%  
In [15]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [16]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
In [17]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0349123029.998977307201023.7425111.00.0000910.00122630.0004790.0036010.0067140.0103490.0264960.008568
20482048.0728593029.998933614402048.0439071.00.0000770.0008230.0004130.000750.0040190.0090450.0205970.000424
40964096.0765063029.9996841228814096.0495861.00.0000730.0004480.0003530.0004810.0005380.0046260.0137590.000197
81928192.3070583029.9989982457618192.2712231.00.0000740.0003460.0003150.0004240.0004620.0006390.0243440.000131
1638416384.2996253029.99951249152116384.2162471.00.0000650.0003250.0003010.0004010.0004440.0011770.0135730.000153
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0360263029.998945307201024.0170631.00.0001810.0007310.000730.0008780.0009280.0010860.0039740.000556
20482048.0749283029.998902614402048.0277911.00.0001880.0006680.0006670.0008090.0008580.0009780.0049550.00069
40964096.1698023029.9990011228814096.109751.00.0001660.0006140.0006070.0007450.0007910.0009430.0067750.00044
81928192.306623029.9989992457618192.1953561.00.0001630.0005760.0005590.0006880.0007380.0011960.0091890.000407
1638416384.0491563029.99997149152115888.1629990.9697530.0000290.0028520.0005410.0006960.0008820.0058671.1201230.000602

Test: eth_getBlockByNumber

In [18]:
# load test results

test_name = 'eth_getBlockByNumber'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [19]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌─────────────────────────────────┐
│ eth_getBlockByNumber parameters │
└─────────────────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [20]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  0.996452  │         100.4%  
       4,096  │  1.000000  │  1.000000  │         100.0%  
       8,192  │  0.964848  │  0.991927  │          97.3%  
      16,384  │  0.876260  │  0.729249  │         120.2%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,024.0  │    1,024.0  │         100.0%  
       2,048  │    2,047.9  │    2,040.4  │         100.4%  
       4,096  │    4,095.9  │    4,095.8  │         100.0%  
       8,192  │    7,903.5  │    8,125.0  │          97.3%  
      16,384  │   14,317.2  │   11,916.9  │         120.1%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.001965  │   0.002888  │          68.0%  
       2,048  │   0.001669  │   0.002794  │          59.8%  
       4,096  │   0.001570  │   0.002532  │          62.0%  
       8,192  │   0.002898  │   0.002948  │          98.3%  
      16,384  │   0.007236  │   0.021714  │          33.3%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.003452  │   0.005235  │          65.9%  
       2,048  │   0.003057  │   0.005973  │          51.2%  
       4,096  │   0.003071  │   0.005300  │          57.9%  
       8,192  │   0.142042  │   0.027483  │         516.8%  
      16,384  │   0.098172  │   0.140488  │          69.9%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.006031  │   0.008550  │          70.5%  
       2,048  │   0.005515  │   0.519520  │           1.1%  
       4,096  │   0.005952  │   0.011441  │          52.0%  
       8,192  │   0.173364  │   0.249618  │          69.5%  
      16,384  │   0.117214  │   0.546147  │          21.5%  
In [21]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [22]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: connect: connection refused
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: socket: too many open files
In [23]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0344693029.99899307201023.9879681.00.0001440.0021240.0019650.0034520.0042350.0060310.0209680.001362
20482048.0720433029.998945614402047.874721.00.0001320.0018110.0016690.0030570.0039240.0055150.0201030.002891
40964096.1665553029.9990241228814095.9349431.00.0001110.0017450.001570.0030710.0041160.0059520.0253220.001696
81928192.3008593029.999022457617903.524080.9648480.0000330.0405380.0028980.1420420.1552430.1733640.2175010.003039
1638416384.1552793029.99977749152114317.2401390.876260.000030.0286310.0072360.0981720.1054910.1172140.2430510.082837
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0350663029.998973307201023.9711141.00.0003330.0030710.0028880.0052350.0061480.008550.0294080.001874
20482048.0684323029.999486614412040.426560.9964520.0000450.0178230.0027940.0059730.0095260.519521.209210.005515
40964096.1599033029.9990731228814095.7509971.00.0002540.0029150.0025320.00530.0068130.0114410.0773590.002995
81928192.3079353029.9989942457618125.003820.9919270.0000340.014860.0029480.0274830.081490.2496181.183240.004315
1638416384.5243283029.99916249152211916.9221990.7292490.0000310.0616740.0217140.1404880.3369790.5461473.6038760.079242

Test: eth_getCode

In [24]:
# load test results

test_name = 'eth_getCode'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [25]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌────────────────────────┐
│ eth_getCode parameters │
└────────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [26]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  1.000000  │         100.0%  
       4,096  │  1.000000  │  1.000000  │         100.0%  
       8,192  │  1.000000  │  0.989986  │         101.0%  
      16,384  │  1.000000  │  1.000000  │         100.0%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,024.0  │    1,024.0  │         100.0%  
       2,048  │    2,048.0  │    2,048.0  │         100.0%  
       4,096  │    4,096.1  │    4,096.1  │         100.0%  
       8,192  │    8,192.3  │    8,110.1  │         101.0%  
      16,384  │   16,384.0  │   16,383.8  │         100.0%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000524  │   0.000446  │         117.5%  
       2,048  │   0.000452  │   0.000413  │         109.6%  
       4,096  │   0.000375  │   0.000377  │          99.6%  
       8,192  │   0.000330  │   0.000350  │          94.3%  
      16,384  │   0.000316  │   0.000307  │         102.8%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000795  │   0.000625  │         127.2%  
       2,048  │   0.000664  │   0.000562  │         118.1%  
       4,096  │   0.000526  │   0.000515  │         102.2%  
       8,192  │   0.000455  │   0.000505  │          90.1%  
      16,384  │   0.000438  │   0.000515  │          85.0%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.008384  │   0.001358  │         617.4%  
       2,048  │   0.008427  │   0.001240  │         679.3%  
       4,096  │   0.003650  │   0.001159  │         315.0%  
       8,192  │   0.000697  │   0.028117  │           2.5%  
      16,384  │   0.001708  │   0.002716  │          62.9%  
In [27]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [28]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
In [29]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0363423029.998935307201024.0209011.00.0000930.0007970.0005240.0007950.0014540.0083840.020410.000452
20482048.0700263029.998974614402048.0347561.00.0000920.0007350.0004520.0006640.0016920.0084270.0298780.000517
40964096.1709243029.9989921228814096.1333181.00.0000680.0004610.0003750.0005260.0005990.003650.0147080.000275
81928192.3040493029.9990092457618192.2702121.00.0000750.0003460.000330.0004550.0005080.0006970.0150880.000124
1638416384.065973030.00000149152216383.9669311.00.000070.0003530.0003160.0004380.0005080.0017080.0210350.000181
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.035943029.998947307201024.0210331.00.0001550.000480.0004460.0006250.0007270.0013580.0057540.000437
20482048.0500423029.999267614402048.0154941.00.0001540.0004440.0004130.0005620.0006430.001240.0060020.000506
40964096.1663353029.9990261228814096.0882771.00.0001450.0004070.0003770.0005150.0005870.0011590.0051440.000572
81928192.3072413029.9989972457618110.1237110.9899860.0000410.0030310.000350.0005050.0006560.02811720.7495780.000545
1638416384.0552083030.00002149152216383.818521.00.0001310.0003980.0003070.0005150.0007470.0027160.0208930.000433

Test: eth_getLogs

In [30]:
# load test results

test_name = 'eth_getLogs'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [31]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌────────────────────────┐
│ eth_getLogs parameters │
└────────────────────────┘
- sample rates: [64, 128, 256, 512, 1024]
- sample duration: 30
In [32]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
          64  │  1.000000  │  1.000000  │         100.0%  
         128  │  0.476823  │  0.909635  │          52.4%  
         256  │  0.251953  │  0.443359  │          56.8%  
         512  │  0.143034  │  0.221680  │          64.5%  
       1,024  │  0.066309  │  0.108008  │          61.4%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
          64  │       59.2  │       63.9  │          92.7%  
         128  │       41.3  │       87.7  │          47.1%  
         256  │       41.0  │       84.9  │          48.3%  
         512  │       46.7  │       81.2  │          57.4%  
       1,024  │       42.6  │       81.8  │          52.1%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │    node1 (s)  │    node2 (s)  │  node1 / node2  
──────────────┼───────────────┼───────────────┼─────────────────
          64  │     3.532957  │     0.090051  │       3,923.3%  
         128  │     0.000213  │     7.889392  │           0.0%  
         256  │     0.000133  │     0.000138  │          95.8%  
         512  │     0.000121  │     0.000103  │         117.6%  
       1,024  │  9.212500e-5  │  9.598800e-5  │          96.0%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
          64  │   4.218673  │   0.296781  │       1,421.5%  
         128  │  23.773954  │  11.035591  │         215.4%  
         256  │  23.241839  │  13.138007  │         176.9%  
         512  │  19.412188  │  11.928917  │         162.7%  
       1,024  │   0.000171  │   4.643883  │           0.0%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
          64  │   4.495095  │   0.542333  │         828.8%  
         128  │  25.681307  │  16.724409  │         153.6%  
         256  │  27.044285  │  15.576430  │         173.6%  
         512  │  24.949272  │  15.264888  │         163.4%  
       1,024  │  27.304354  │  14.093652  │         193.7%  
In [33]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [34]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
In [35]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
6464.0334673029.984321192059.1826251.00.1280183.3027013.5329574.2186734.3484794.4950954.6492942.457634
128128.0333683029.992181384041.3290020.4768230.0000599.0813370.00021323.77395424.77332125.68130725.99017314.310848
256256.0353073029.995863768041.0132690.2519530.0000485.4408670.00013323.24183925.41820127.04428527.89403517.18399
512512.0357123029.9979081536046.669670.1430340.0000422.8338860.00012119.41218822.77136324.94927226.83417917.077634
10241024.0334753029.9990193072042.619660.0663090.0000361.4965920.0000920.00017119.01732327.30435429.07259217.795825
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
6464.0334373029.984335192063.8688211.00.0104160.135370.0900510.2967810.4253660.5423330.6932490.077282
128128.0326293029.992355384087.7141140.9096350.0000796.9727267.88939211.03559112.03736116.72440918.17359.830199
256256.0352173029.995874768084.9090480.4433590.0000684.4308970.00013813.13800714.15562915.5764317.28583410.10586
512512.0363333029.9978711536081.2443270.221680.000062.3628490.00010311.92891713.42785315.26488817.6388211.912748
10241024.0739313029.9978343072081.7883910.1080080.0000481.1744850.0000964.64388312.25056814.09365217.15558510.57027

Test: eth_getStorageAt

In [36]:
# load test results

test_name = 'eth_getStorageAt'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [37]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌─────────────────────────────┐
│ eth_getStorageAt parameters │
└─────────────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [38]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  1.000000  │         100.0%  
       4,096  │  1.000000  │  1.000000  │         100.0%  
       8,192  │  1.000000  │  1.000000  │         100.0%  
      16,384  │  1.000000  │  0.968459  │         103.3%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,024.0  │    1,024.0  │         100.0%  
       2,048  │    2,048.1  │    2,048.1  │         100.0%  
       4,096  │    4,096.0  │    4,096.1  │         100.0%  
       8,192  │    8,192.3  │    8,192.1  │         100.0%  
      16,384  │   16,383.8  │   15,866.9  │         103.3%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000582  │   0.000879  │          66.1%  
       2,048  │   0.000465  │   0.000760  │          61.1%  
       4,096  │   0.000390  │   0.000681  │          57.3%  
       8,192  │   0.000342  │   0.000623  │          54.8%  
      16,384  │   0.000321  │   0.000580  │          55.3%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.003094  │   0.001319  │         234.7%  
       2,048  │   0.000766  │   0.001093  │          70.1%  
       4,096  │   0.000579  │   0.000986  │          58.7%  
       8,192  │   0.000511  │   0.000922  │          55.4%  
      16,384  │   0.000492  │   0.000920  │          53.5%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.010970  │   0.001684  │         651.2%  
       2,048  │   0.008568  │   0.001473  │         581.7%  
       4,096  │   0.001437  │   0.001330  │         108.0%  
       8,192  │   0.000741  │   0.001416  │          52.3%  
      16,384  │   0.002467  │   0.005415  │          45.6%  
In [39]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [40]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
In [41]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.034773029.998981307201024.017331.00.0001430.0012980.0005820.0030940.0067060.010970.0287630.000511
20482048.0761493029.999373614412048.0620021.00.0000780.0007750.0004650.0007660.0019920.0085680.0316150.000207
40964096.0434163029.9999261228814096.0204791.00.0000860.0004570.000390.0005790.0006540.0014370.0132730.000168
81928192.3126443029.9989772457618192.266891.00.0000640.0003690.0003420.0005110.0005660.0007410.0088050.000168
1638416384.0274263030.00001149152116383.8374731.00.0000680.00040.0003210.0004920.0005860.0024670.0355740.000348
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0346843029.998984307201024.0047091.00.0001830.0008950.0008790.0013190.0014740.0016840.0045190.000878
20482048.0751033029.9989614402048.0556351.00.0001570.000760.000760.0010930.0012120.0014730.0059230.000285
40964096.1485383029.9991561228814096.081911.00.0001740.0006830.0006810.0009860.0010770.001330.0072170.000488
81928192.305223029.9990042457618192.1328361.00.0001680.0006410.0006230.0009220.0010190.0014160.010580.000631
1638416384.0547473030.00002249152215866.8871680.9684590.0000320.0028490.000580.000920.0011020.0054151.0960360.000759

Test: eth_getTransactionByHash

In [42]:
# load test results

test_name = 'eth_getTransactionByHash'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [43]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌─────────────────────────────────────┐
│ eth_getTransactionByHash parameters │
└─────────────────────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [44]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  0.967465  │         103.4%  
       4,096  │  1.000000  │  0.995361  │         100.5%  
       8,192  │  1.000000  │  0.947414  │         105.6%  
      16,384  │  1.000000  │  0.516398  │         193.6%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │    1,024.0  │    1,023.8  │         100.0%  
       2,048  │    2,048.0  │    1,981.1  │         103.4%  
       4,096  │    4,096.1  │    4,076.2  │         100.5%  
       8,192  │    8,191.9  │    7,757.8  │         105.6%  
      16,384  │   16,384.3  │    8,387.0  │         195.4%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.000840  │   0.006718  │          12.5%  
       2,048  │   0.000713  │   0.005206  │          13.7%  
       4,096  │   0.000636  │   0.004523  │          14.1%  
       8,192  │   0.000577  │   0.006928  │           8.3%  
      16,384  │   0.000522  │   0.006506  │           8.0%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.001178  │   0.008894  │          13.2%  
       2,048  │   0.000931  │   0.007356  │          12.7%  
       4,096  │   0.000828  │   0.008378  │           9.9%  
       8,192  │   0.000767  │   0.104966  │           0.7%  
      16,384  │   0.000720  │   0.093613  │           0.8%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.008913  │   0.011596  │          76.9%  
       2,048  │   0.001432  │   0.925057  │           0.2%  
       4,096  │   0.001195  │   0.315631  │           0.4%  
       8,192  │   0.001845  │   0.692841  │           0.3%  
      16,384  │   0.002342  │   0.938341  │           0.2%  
In [45]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [46]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
In [47]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0340483029.999003307201024.0010461.00.0001940.0011370.000840.0011780.0025680.0089130.0222270.000967
20482048.0703323029.99897614402048.0240241.00.0001660.000750.0007130.0009310.0010140.0014320.0121170.000678
40964096.1491883029.9991511228814096.1123561.00.0001560.0006580.0006360.0008280.0009040.0011950.0098780.00027
81928192.0850653029.9998112457618191.9474721.00.0001310.0006190.0005770.0007670.0008440.0018450.0202120.000504
1638416384.6073043029.99894949152116384.3350461.00.0001360.0005840.0005220.000720.0008260.0023420.0197970.000498
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0344393029.998991307201023.7858991.00.0011420.0068330.0067180.0088940.0096950.0115960.0283550.007283
20482048.032883030.000007614411981.0738790.9674650.0000360.0357310.0052060.0073560.0220630.9250571.2507310.004932
40964096.1414453029.9992081228814076.2369560.9953610.0000430.0145020.0045230.0083780.0332580.3156311.190560.006653
81928189.7055683030.0026662457137757.8405340.9474140.0000380.0453220.0069280.1049660.195570.6928412.0177750.004654
1638416384.0581083030.0000164915228387.0165220.5163980.0000290.0621810.0065060.0936130.2217560.9383417.9243460.263547

Test: eth_getTransactionCount

In [48]:
# load test results

test_name = 'eth_getTransactionCount'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [49]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌────────────────────────────────────┐
│ eth_getTransactionCount parameters │
└────────────────────────────────────┘
- sample rates: [2048, 4096, 8192, 16384, 32768]
- sample duration: 30
In [50]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       2,048  │  0.000000  │  1.000000  │           0.0%  
       4,096  │  0.000000  │  1.000000  │           0.0%  
       8,192  │  0.000000  │  1.000000  │           0.0%  
      16,384  │  0.000000  │  1.000000  │           0.0%  
      32,768  │  0.000000  │  0.954081  │           0.0%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │  node1 (s)  │      node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────────┼─────────────────
       2,048  │   0.000000  │   2,048.046077  │           0.0%  
       4,096  │   0.000000  │   4,096.101055  │           0.0%  
       8,192  │   0.000000  │   8,192.005149  │           0.0%  
      16,384  │   0.000000  │  16,383.825278  │           0.0%  
      32,768  │   0.000000  │  31,263.286588  │           0.0%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │    node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼───────────────┼─────────────┼─────────────────
       2,048  │  9.637500e-5  │   0.000418  │          23.0%  
       4,096  │  8.422800e-5  │   0.000388  │          21.7%  
       8,192  │  8.260400e-5  │   0.000367  │          22.5%  
      16,384  │  8.930300e-5  │   0.000336  │          26.6%  
      32,768  │  7.757300e-5  │   0.000299  │          25.9%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       2,048  │   0.000136  │   0.000635  │          21.4%  
       4,096  │   0.000119  │   0.000590  │          20.1%  
       8,192  │   0.000121  │   0.000564  │          21.5%  
      16,384  │   0.000129  │   0.000545  │          23.6%  
      32,768  │   0.000118  │   0.002878  │           4.1%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       2,048  │  30.000114  │   0.000823  │   3,646,224.4%  
       4,096  │   5.663850  │   0.000774  │     731,378.1%  
       8,192  │   0.000184  │   0.000944  │          19.4%  
      16,384  │   0.000211  │   0.002881  │           7.3%  
      32,768  │   0.000231  │   0.032226  │           0.7%  
In [51]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [52]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1
- Post "http://localhost:8545": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: connect: connection refused
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: socket: too many open files

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: socket: too many open files
In [53]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64i64f64f64f64f64f64f64f64f64f64
20482048.0746373029.9989076144000.00.0000340.4966780.0000960.0001360.00015730.00011430.0002970.496723
40964096.162613029.99905312288100.00.0000330.2483760.0000840.0001190.0001335.6638530.0005460.248842
81928192.313223029.99897524576100.00.0000290.1242310.0000830.0001210.0001390.00018430.0002290.124169
1638416384.3031143029.99895749151200.00.0000240.0621680.0000890.0001290.0001470.00021130.0023990.089219
3276832768.8354863029.99981598305900.00.0000240.0311310.0000780.0001180.0001350.00023130.001350.052987
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
20482048.0693773029.998984614402048.0460771.00.0001440.0004510.0004180.0006350.0006970.0008230.0047050.000341
40964096.1652793029.9990341228814096.1010551.00.0001460.0004210.0003880.000590.0006480.0007740.0068680.00047
81928192.1024363029.9997472457618192.0051491.00.0001450.0004060.0003670.0005640.0006260.0009440.0119910.000356
1638416384.0420883029.99998449152116383.8252781.00.0001340.0004210.0003360.0005450.0006430.0028810.0181890.000397
3276832768.6316023030.00000298305931263.2865880.9540810.0000270.0033140.0002990.0028780.0111790.0322261.1428010.000619

Test: eth_getTransactionReceipt

In [54]:
# load test results

test_name = 'eth_getTransactionReceipt'
results_payload = results_payloads[test_name]
results = results_payload['results']
In [55]:
# show test metadata

toolstr.print_text_box(test_name + ' parameters')
flood.print_load_test_summary(results_payload['test'])
┌──────────────────────────────────────┐
│ eth_getTransactionReceipt parameters │
└──────────────────────────────────────┘
- sample rates: [1024, 2048, 4096, 8192, 16384]
- sample duration: 30
In [56]:
# show result tables

flood.print_metric_tables(results, metrics=metrics, comparison=True)
┌─────────────────┐
│ success vs load │
└─────────────────┘
  rate (rps)  │     node1  │     node2  │  node1 / node2  
──────────────┼────────────┼────────────┼─────────────────
       1,024  │  1.000000  │  1.000000  │         100.0%  
       2,048  │  1.000000  │  1.000000  │         100.0%  
       4,096  │  1.000000  │  0.989282  │         101.1%  
       8,192  │  0.012020  │  0.799451  │           1.5%  
      16,384  │  0.000000  │  0.393237  │           0.0%  

┌────────────────────┐
│ throughput vs load │
└────────────────────┘
  rate (rps)  │     node1 (s)  │     node2 (s)  │  node1 / node2  
──────────────┼────────────────┼────────────────┼─────────────────
       1,024  │  1,023.945980  │  1,023.777299  │         100.0%  
       2,048  │  2,048.004898  │  2,047.805641  │         100.0%  
       4,096  │  4,095.733749  │  4,051.067581  │         101.1%  
       8,192  │     96.831538  │  6,514.941210  │           1.5%  
      16,384  │      0.000000  │  6,416.854031  │           0.0%  

┌─────────────┐
│ p50 vs load │
└─────────────┘
  rate (rps)  │    node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼───────────────┼─────────────┼─────────────────
       1,024  │     0.001322  │   0.003390  │          39.0%  
       2,048  │     0.001235  │   0.003290  │          37.5%  
       4,096  │     0.001357  │   0.003949  │          34.4%  
       8,192  │  8.409700e-5  │   0.096340  │           0.1%  
      16,384  │  8.871700e-5  │   0.000114  │          78.2%  

┌─────────────┐
│ p90 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.002938  │   0.008271  │          35.5%  
       2,048  │   0.002742  │   0.008154  │          33.6%  
       4,096  │   0.003132  │   0.140857  │           2.2%  
       8,192  │   0.000125  │   0.219684  │           0.1%  
      16,384  │   0.000128  │   0.169721  │           0.1%  

┌─────────────┐
│ p99 vs load │
└─────────────┘
  rate (rps)  │  node1 (s)  │  node2 (s)  │  node1 / node2  
──────────────┼─────────────┼─────────────┼─────────────────
       1,024  │   0.006970  │   0.406472  │           1.7%  
       2,048  │   0.004054  │   0.060305  │           6.7%  
       4,096  │   0.005572  │   0.435537  │           1.3%  
       8,192  │   0.072671  │   0.576797  │          12.6%  
      16,384  │   0.000197  │   0.389373  │           0.1%  
In [57]:
# show result figures

colors = flood.get_nodes_plot_colors(nodes=results_payload['nodes'])
flood.plot_load_test_results(
    test_name=test_name,
    outputs=results,
    latency_yscale_log=True,
    colors=colors,
)
In [58]:
# show errors

toolstr.print_text_box('Error messages present in each test')
unique_errors = {}
for n, name in enumerate(results.keys()):
    unique_errors.setdefault(name, set())
    unique_errors[name] |= {
        error for error_list in results[name]['errors'] for error in error_list
    }
    print(name)
    for error in unique_errors[name]:
        print('-', error)
    if n != len(results) - 1:
        print()
┌─────────────────────────────────────┐
│ Error messages present in each test │
└─────────────────────────────────────┘
node1
- Post "http://localhost:8545": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: connect: connection refused
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: socket: too many open files

node2
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->127.0.0.1:8545: socket: too many open files
- Post "http://localhost:8545": dial tcp 0.0.0.0:0->[::1]:8545: socket: too many open files
In [59]:
# show complete results

for name in results.keys():
    toolstr.print_text_box(name + " Complete Results")
    df = pl.DataFrame(results[name])
    df = df.drop(
        "status_codes",
        "errors",
        "first_request_timestamp",
        "last_request_timestamp",
        "last_response_timestamp",
    )
    IPython.display.display(df)
┌────────────────────────┐
│ node1 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0371483029.998912307201023.945981.00.0002090.0016350.0013220.0029380.0034420.006970.0356920.002671
20482048.044953029.999342614402048.0048981.00.0001730.0014450.0012350.0027420.0031960.0040540.0144660.000587
40964096.1554733029.9991051228814095.7337491.00.0001690.0016540.0013570.0031320.0037520.0055720.0228870.003089
81928192.3206033029.99894824576196.8315380.012020.0000290.1250750.0000840.0001250.0001470.07267130.0002550.507642
1638416384.3352293029.9995084915220.00.00.0000250.0621660.0000890.0001280.0001440.00019730.000580.091034
┌────────────────────────┐
│ node2 Complete Results │
└────────────────────────┘
shape: (5, 15)
target_rateactual_ratetarget_durationactual_durationrequeststhroughputsuccessminmeanp50p90p95p99maxfinal_wait_time
i64f64i64f64i64f64f64f64f64f64f64f64f64f64f64
10241024.0339053029.999007307201023.7772991.00.0005540.0146040.003390.0082710.0100380.4064721.1483050.007519
20482048.0676383029.999009614402047.8056411.00.0004810.0057840.003290.0081540.0102960.0603050.4100820.003838
40964096.1545493029.9991121228814051.0675810.9892820.0000370.037770.0039490.1408570.2299460.4355371.5630820.00878
81928192.0594413029.9999042457616514.941210.7994510.0000330.1106960.096340.2196840.2855780.5767972.1132850.157543
1638416384.0327693030.0000014915216416.8540310.3932370.0000270.0621470.0001140.1697210.2181360.3893732.873730.121301