+{html_doc_end}"""
- # Get all unique sizes (columns) and test names (rows)
- # Using ordered dictionaries to maintain insertion order from loading, then sorting keys
- # Or simply sort the keys after extraction:
- all_sizes = sorted(list(set(size for test_data in results.values() for size in test_data.keys())))
+ all_sizes = sorted(
+ list(set(size for test_data in results.values() for size in test_data.keys()))
+ )
all_test_names = sorted(list(results.keys()))
- html_string = """
-
-
Criterion Benchmark Results
-
Each cell links to the detailed Criterion report for that specific benchmark size.
-
Note: Values shown are the midpoint of the mean confidence interval, formatted for readability.
-
-
-
-
Benchmark Name
+ table_content = """
+
Each cell links to the detailed Criterion.rs report for that specific benchmark size.
+
Note: Values shown are the midpoint of the mean confidence interval, formatted for readability.
"""
- # Add size headers
for size in all_sizes:
- html_string += f"
{html.escape(size)}
\n"
+ table_content += f"
{html.escape(size)}
\n"
- html_string += """
-
-
-
+ table_content += """
+
+
+
"""
- # Add data rows
for test_name in all_test_names:
- html_string += f"
\n"
- html_string += f"
{html.escape(test_name)}
\n"
+ table_content += f"
\n"
+ table_content += f"
{html.escape(test_name)}
\n"
- # Iterate through all possible sizes to ensure columns align
for size in all_sizes:
cell_data = results.get(test_name, {}).get(size)
- mean_value = pd.NA # Default value
- full_report_url = "#" # Default link to self or dummy
+ mean_value = pd.NA
+ full_report_url = "#"
- if cell_data and 'json' in cell_data and 'html_path_relative_to_criterion_root' in cell_data:
+ if (
+ cell_data
+ and "json" in cell_data
+ and "html_path_relative_to_criterion_root" in cell_data
+ ):
try:
- # Extract mean from JSON
- mean_data = cell_data['json'].get("mean")
+ mean_data = cell_data["json"].get("mean")
if mean_data and "confidence_interval" in mean_data:
ci = mean_data["confidence_interval"]
if "lower_bound" in ci and "upper_bound" in ci:
- lower, upper = ci["lower_bound"], ci["upper_bound"]
- if isinstance(lower, (int, float)) and isinstance(upper, (int, float)):
- mean_value = (lower + upper) / 2.0
- else:
- print(f"Warning: Non-numeric bounds for {test_name} ({size}).", file=sys.stderr)
+ lower, upper = ci["lower_bound"], ci["upper_bound"]
+ if isinstance(lower, (int, float)) and isinstance(
+ upper, (int, float)
+ ):
+ mean_value = (lower + upper) / 2.0
+ else:
+ print(
+ f"Warning: Non-numeric bounds for {test_name} ({size}).",
+ file=sys.stderr,
+ )
else:
- print(f"Warning: Missing confidence_interval bounds for {test_name} ({size}).", file=sys.stderr)
+ print(
+ f"Warning: Missing confidence_interval bounds for {test_name} ({size}).",
+ file=sys.stderr,
+ )
else:
- print(f"Warning: Missing 'mean' data for {test_name} ({size}).", file=sys.stderr)
-
- # Construct the full relative URL
- relative_report_path = cell_data['html_path_relative_to_criterion_root']
- full_report_url = f"{html_base_path}{relative_report_path}"
- # Ensure forward slashes and resolve potential double slashes if html_base_path ends in /
- full_report_url = str(Path(full_report_url)).replace('\\', '/')
+ print(
+ f"Warning: Missing 'mean' data for {test_name} ({size}).",
+ file=sys.stderr,
+ )
+ relative_report_path = cell_data[
+ "html_path_relative_to_criterion_root"
+ ]
+ joined_path = Path(html_base_path) / relative_report_path
+ full_report_url = str(joined_path).replace("\\", "/")
except Exception as e:
- print(f"Error processing cell data for {test_name} ({size}): {e}", file=sys.stderr)
- # Keep mean_value as NA and URL as '#'
+ print(
+ f"Error processing cell data for {test_name} ({size}): {e}",
+ file=sys.stderr,
+ )
- # Format the mean value for display
formatted_mean = format_nanoseconds(mean_value)
- # Create the link cell
- # Only make it a link if a valid report path was found
if full_report_url and full_report_url != "#":
- html_string += f'
"""
-
- return html_string
+ return f"{html_doc_start}{table_content}{html_doc_end}"
if __name__ == "__main__":
DEFAULT_CRITERION_PATH = "target/criterion"
- # Default relative path from benchmark_results.html to the criterion root on the hosted site
- # Assumes benchmark_results.html is in .../doc//benchmarks/
- # And target/criterion is copied to .../doc//target/criterion/
- # So the path from benchmarks/ to target/criterion/ is ../target/criterion/
- DEFAULT_HTML_BASE_PATH = "../target/criterion/"
+ DEFAULT_OUTPUT_FILE = "./target/criterion/index.html"
+ DEFAULT_HTML_BASE_PATH = ""
parser = argparse.ArgumentParser(
description="Load Criterion benchmark results from JSON files and generate an HTML table with links to reports."
@@ -250,52 +349,66 @@ if __name__ == "__main__":
"--criterion-dir",
type=str,
default=DEFAULT_CRITERION_PATH,
- help=f"Path to the main 'target/criterion' directory (default: {DEFAULT_CRITERION_PATH}) on the runner.",
+ help=f"Path to the main 'target/criterion' directory (default: {DEFAULT_CRITERION_PATH}) containing benchmark data.",
)
parser.add_argument(
"--html-base-path",
type=str,
default=DEFAULT_HTML_BASE_PATH,
- help=f"Relative URL path from the output HTML file to the hosted 'target/criterion' directory (default: {DEFAULT_HTML_BASE_PATH}).",
+ help=(
+ f"Prefix for HTML links to individual benchmark reports. "
+ f"This is prepended to each report's relative path (e.g., 'benchmark_name/report/index.html'). "
+ f"If the main output HTML (default: '{DEFAULT_OUTPUT_FILE}') is in the 'target/criterion/' directory, "
+ f"this should typically be empty (default: '{DEFAULT_HTML_BASE_PATH}'). "
+ ),
)
parser.add_argument(
"--output-file",
type=str,
- default="benchmark_results.html",
- help="Name of the output HTML file (default: benchmark_results.html)."
+ default=DEFAULT_OUTPUT_FILE,
+ help=f"Path to save the generated HTML summary report (default: {DEFAULT_OUTPUT_FILE}).",
)
-
args = parser.parse_args()
criterion_path = Path(args.criterion_dir)
+ output_file_path = Path(args.output_file)
+
+ try:
+ output_file_path.parent.mkdir(parents=True, exist_ok=True)
+ except OSError as e:
+ print(
+ f"Error: Could not create output directory {output_file_path.parent}: {e}",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
all_results = load_criterion_reports(criterion_path)
+ # Generate HTML output regardless of whether results were found (handles "no results" page)
+ html_output = generate_html_table_with_links(all_results, args.html_base_path)
+
if not all_results:
print("\nNo benchmark results found or loaded.")
- # Still create an empty file or a file with an error message
- try:
- with open(args.output_file, "w", encoding="utf-8") as f:
- f.write("
Criterion Benchmark Results
No benchmark results found or loaded.
")
- print(f"Created empty/error HTML file: {args.output_file}")
- except IOError as e:
- print(f"Error creating empty/error HTML file {args.output_file}: {e}", file=sys.stderr)
- sys.exit(1) # Indicate failure if no data was loaded successfully
+ # Fallthrough to write the "no results" page generated by generate_html_table_with_links
+ else:
+ print("\nSuccessfully loaded benchmark results.")
+ # pprint(all_results) # Uncomment for debugging
- print("\nSuccessfully loaded benchmark results.")
- # pprint(all_results) # Uncomment for debugging
-
- print(f"Generating HTML table with links using base path: {args.html_base_path}")
- html_output = generate_html_table_with_links(all_results, args.html_base_path)
+ print(
+ f"Generating HTML report with links using HTML base path: '{args.html_base_path}'"
+ )
try:
- with open(args.output_file, "w", encoding="utf-8") as f:
+ with output_file_path.open("w", encoding="utf-8") as f:
f.write(html_output)
- print(f"\nSuccessfully wrote HTML table to {args.output_file}")
- sys.exit(0) # Exit successfully
+ print(f"\nSuccessfully wrote HTML report to {output_file_path}")
+ if not all_results:
+ sys.exit(1) # Exit with error code if no results, though file is created
+ sys.exit(0)
except IOError as e:
- print(f"Error writing HTML output to {args.output_file}: {e}", file=sys.stderr)
+ print(f"Error writing HTML output to {output_file_path}: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
- print(f"An unexpected error occurred while writing HTML: {e}", file=sys.stderr)
- sys.exit(1)
\ No newline at end of file
+ print(f"An unexpected error occurred while writing HTML: {e}", file=sys.stderr)
+ sys.exit(1)
From 4e74c2dcfe7b9cb1016ab611451d58ca9fdfa3ee Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Wed, 7 May 2025 00:55:09 +0100
Subject: [PATCH 21/28] Refactor benchmark configurations to improve size
categorization and sampling settings
---
benches/benchmarks.rs | 111 +++++++++++++++++++++++++++---------------
1 file changed, 72 insertions(+), 39 deletions(-)
diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs
index 3565dc2..bd17531 100644
--- a/benches/benchmarks.rs
+++ b/benches/benchmarks.rs
@@ -9,26 +9,36 @@ use rustframe::{
};
use std::time::Duration;
-pub fn for_short_runs() -> Criterion {
+// Define size categories
+const SIZES_SMALL: [usize; 1] = [1];
+const SIZES_MEDIUM: [usize; 3] = [100, 250, 500];
+const SIZES_LARGE: [usize; 1] = [1000];
+
+// Configuration functions for different size categories
+fn config_small_arrays() -> Criterion {
Criterion::default()
- // (samples != total iterations)
- // limits the number of statistical data points.
- .sample_size(50)
- // measurement time per sample
- .measurement_time(Duration::from_millis(2000))
- // reduce warm-up time as well for faster overall run
+ .sample_size(500) // More samples for very fast operations
+ .measurement_time(Duration::from_millis(500))
.warm_up_time(Duration::from_millis(50))
- // can make it much shorter if needed, e.g., 50ms measurement, 100ms warm-up
- // .measurement_time(Duration::from_millis(50))
- // .warm_up_time(Duration::from_millis(100))
}
-const BENCH_SIZES: [usize; 5] = [1, 100, 250, 500, 1000];
+fn config_medium_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(100)
+ .measurement_time(Duration::from_millis(2000))
+ .warm_up_time(Duration::from_millis(100))
+}
-fn bool_matrix_operations_benchmark(c: &mut Criterion) {
- let sizes = BENCH_SIZES;
+fn config_large_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(50)
+ .measurement_time(Duration::from_millis(5000))
+ .warm_up_time(Duration::from_millis(200))
+}
- for &size in &sizes {
+// Modified benchmark functions to accept a slice of sizes
+fn bool_matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
let data1: Vec = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@@ -60,10 +70,8 @@ fn bool_matrix_operations_benchmark(c: &mut Criterion) {
}
}
-fn matrix_boolean_operations_benchmark(c: &mut Criterion) {
- let sizes = BENCH_SIZES;
-
- for &size in &sizes {
+fn matrix_boolean_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
let data1: Vec = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@@ -95,10 +103,8 @@ fn matrix_boolean_operations_benchmark(c: &mut Criterion) {
}
}
-fn matrix_operations_benchmark(c: &mut Criterion) {
- let sizes = BENCH_SIZES;
-
- for &size in &sizes {
+fn matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
let data: Vec = (0..size * size).map(|x| x as f64).collect();
let ma = Matrix::from_vec(data.clone(), size, size);
@@ -127,8 +133,7 @@ fn matrix_operations_benchmark(c: &mut Criterion) {
});
}
- // Benchmarking matrix addition
- for &size in &sizes {
+ for &size in sizes {
let data1: Vec = (0..size * size).map(|x| x as f64).collect();
let data2: Vec = (0..size * size).map(|x| (x + 1) as f64).collect();
let ma = Matrix::from_vec(data1.clone(), size, size);
@@ -167,10 +172,7 @@ fn generate_frame(size: usize) -> Frame {
.unwrap()
.list()
.unwrap();
-
- // let col_names= str(i) for i in range(1, 1000)
let col_names: Vec = (1..=size).map(|i| format!("col_{}", i)).collect();
-
Frame::new(
Matrix::from_vec(data.clone(), size, size),
col_names,
@@ -178,10 +180,8 @@ fn generate_frame(size: usize) -> Frame {
)
}
-fn benchmark_frame_operations(c: &mut Criterion) {
- let sizes = BENCH_SIZES;
-
- for &size in &sizes {
+fn benchmark_frame_operations(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
let fa = generate_frame(size);
let fb = generate_frame(size);
@@ -232,13 +232,46 @@ fn benchmark_frame_operations(c: &mut Criterion) {
}
}
-// Define the criterion group and pass the custom configuration function
+// Runner functions for each size category
+fn run_benchmarks_small(c: &mut Criterion) {
+ bool_matrix_operations_benchmark(c, &SIZES_SMALL);
+ matrix_boolean_operations_benchmark(c, &SIZES_SMALL);
+ matrix_operations_benchmark(c, &SIZES_SMALL);
+ benchmark_frame_operations(c, &SIZES_SMALL);
+}
+
+fn run_benchmarks_medium(c: &mut Criterion) {
+ bool_matrix_operations_benchmark(c, &SIZES_MEDIUM);
+ matrix_boolean_operations_benchmark(c, &SIZES_MEDIUM);
+ matrix_operations_benchmark(c, &SIZES_MEDIUM);
+ benchmark_frame_operations(c, &SIZES_MEDIUM);
+}
+
+fn run_benchmarks_large(c: &mut Criterion) {
+ bool_matrix_operations_benchmark(c, &SIZES_LARGE);
+ matrix_boolean_operations_benchmark(c, &SIZES_LARGE);
+ matrix_operations_benchmark(c, &SIZES_LARGE);
+ benchmark_frame_operations(c, &SIZES_LARGE);
+}
+
criterion_group!(
- name = combined_benches;
- config = for_short_runs(); // Use the custom configuration here
- targets = bool_matrix_operations_benchmark,
- matrix_boolean_operations_benchmark,
- matrix_operations_benchmark,
- benchmark_frame_operations
+ name = benches_small_arrays;
+ config = config_small_arrays();
+ targets = run_benchmarks_small
+);
+criterion_group!(
+ name = benches_medium_arrays;
+ config = config_medium_arrays();
+ targets = run_benchmarks_medium
+);
+criterion_group!(
+ name = benches_large_arrays;
+ config = config_large_arrays();
+ targets = run_benchmarks_large
+);
+
+criterion_main!(
+ benches_small_arrays,
+ benches_medium_arrays,
+ benches_large_arrays
);
-criterion_main!(combined_benches);
From dfe259a3710fe61e34e180b2149849175b76433f Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Wed, 7 May 2025 20:20:42 +0100
Subject: [PATCH 22/28] move benchmark config
---
benches/benchmarks.rs | 44 +++++++++++++++++++++----------------------
1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs
index bd17531..6b94912 100644
--- a/benches/benchmarks.rs
+++ b/benches/benchmarks.rs
@@ -14,28 +14,6 @@ const SIZES_SMALL: [usize; 1] = [1];
const SIZES_MEDIUM: [usize; 3] = [100, 250, 500];
const SIZES_LARGE: [usize; 1] = [1000];
-// Configuration functions for different size categories
-fn config_small_arrays() -> Criterion {
- Criterion::default()
- .sample_size(500) // More samples for very fast operations
- .measurement_time(Duration::from_millis(500))
- .warm_up_time(Duration::from_millis(50))
-}
-
-fn config_medium_arrays() -> Criterion {
- Criterion::default()
- .sample_size(100)
- .measurement_time(Duration::from_millis(2000))
- .warm_up_time(Duration::from_millis(100))
-}
-
-fn config_large_arrays() -> Criterion {
- Criterion::default()
- .sample_size(50)
- .measurement_time(Duration::from_millis(5000))
- .warm_up_time(Duration::from_millis(200))
-}
-
// Modified benchmark functions to accept a slice of sizes
fn bool_matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
for &size in sizes {
@@ -254,6 +232,28 @@ fn run_benchmarks_large(c: &mut Criterion) {
benchmark_frame_operations(c, &SIZES_LARGE);
}
+// Configuration functions for different size categories
+fn config_small_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(500)
+ .measurement_time(Duration::from_millis(500))
+ .warm_up_time(Duration::from_millis(50))
+}
+
+fn config_medium_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(100)
+ .measurement_time(Duration::from_millis(2000))
+ .warm_up_time(Duration::from_millis(100))
+}
+
+fn config_large_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(50)
+ .measurement_time(Duration::from_millis(5000))
+ .warm_up_time(Duration::from_millis(200))
+}
+
criterion_group!(
name = benches_small_arrays;
config = config_small_arrays();
From 2e980a78fad0e2401e5a4e42d0e76d298a8998d2 Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Sun, 11 May 2025 00:46:46 +0100
Subject: [PATCH 23/28] Update documentation for Matrix struct to clarify
indexing method
---
src/matrix/mat.rs | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/matrix/mat.rs b/src/matrix/mat.rs
index 180650a..18eb804 100644
--- a/src/matrix/mat.rs
+++ b/src/matrix/mat.rs
@@ -2,7 +2,7 @@
use std::ops::{Add, BitAnd, BitOr, BitXor, Div, Index, IndexMut, Mul, Not, Sub};
-/// A columnโmajor 2D matrix of `T`
+/// A columnโmajor 2D matrix of `T`. Index as `Array(row, column)`.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Matrix {
rows: usize,
From 1a5b8919d3f4a1e3a4c1ec15572a88a7dbc02966 Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Sun, 11 May 2025 00:51:24 +0100
Subject: [PATCH 24/28] Adjust benchmark configuration for small arrays to
optimize measurement and warm-up times
---
benches/benchmarks.rs | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs
index 6b94912..bb74a64 100644
--- a/benches/benchmarks.rs
+++ b/benches/benchmarks.rs
@@ -236,8 +236,8 @@ fn run_benchmarks_large(c: &mut Criterion) {
fn config_small_arrays() -> Criterion {
Criterion::default()
.sample_size(500)
- .measurement_time(Duration::from_millis(500))
- .warm_up_time(Duration::from_millis(50))
+ .measurement_time(Duration::from_millis(100))
+ .warm_up_time(Duration::from_millis(5))
}
fn config_medium_arrays() -> Criterion {
@@ -254,6 +254,7 @@ fn config_large_arrays() -> Criterion {
.warm_up_time(Duration::from_millis(200))
}
+
criterion_group!(
name = benches_small_arrays;
config = config_small_arrays();
From 6a9b828adad3a7d63d11bef9fd26097c1e4702e4 Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Sun, 11 May 2025 00:51:35 +0100
Subject: [PATCH 25/28] Sort benchmark sizes numerically in HTML report
generation
---
.github/scripts/custom_benchmark_report.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.github/scripts/custom_benchmark_report.py b/.github/scripts/custom_benchmark_report.py
index 90c2562..f255d98 100644
--- a/.github/scripts/custom_benchmark_report.py
+++ b/.github/scripts/custom_benchmark_report.py
@@ -247,7 +247,8 @@ def generate_html_table_with_links(
{html_doc_end}"""
all_sizes = sorted(
- list(set(size for test_data in results.values() for size in test_data.keys()))
+ list(set(size for test_data in results.values() for size in test_data.keys())),
+ key=(lambda x: int(x.split("x")[0])),
)
all_test_names = sorted(list(results.keys()))
From 9702b6d5c456ed7d50e7059547f467199b503696 Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Sun, 11 May 2025 00:55:29 +0100
Subject: [PATCH 26/28] Add custom benchmark report generation step to workflow
---
.github/workflows/run-benchmarks.yml | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/.github/workflows/run-benchmarks.yml b/.github/workflows/run-benchmarks.yml
index baea81a..bd73ca8 100644
--- a/.github/workflows/run-benchmarks.yml
+++ b/.github/workflows/run-benchmarks.yml
@@ -36,6 +36,16 @@ jobs:
- name: Run benchmarks
run: cargo bench --features bench
+ - name: Generate custom benchmark reports
+ run: |
+ if [ -d ./target/criterion ]; then
+ echo "Found benchmark reports, generating custom report..."
+ else
+ echo "No benchmark reports found, skipping custom report generation."
+ exit 1
+ fi
+ python .github/scripts/custom_benchmark_report.py
+
- name: Upload benchmark reports
uses: actions/upload-artifact@v4
with:
From 876f1ccbf37dc7996cb0f3bc88d6c7c40825abbd Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Sun, 11 May 2025 01:06:27 +0100
Subject: [PATCH 27/28] Fix Python command to use python3 for custom benchmark
report generation
---
.github/workflows/run-benchmarks.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/run-benchmarks.yml b/.github/workflows/run-benchmarks.yml
index bd73ca8..d0b9a65 100644
--- a/.github/workflows/run-benchmarks.yml
+++ b/.github/workflows/run-benchmarks.yml
@@ -44,7 +44,7 @@ jobs:
echo "No benchmark reports found, skipping custom report generation."
exit 1
fi
- python .github/scripts/custom_benchmark_report.py
+ python3 .github/scripts/custom_benchmark_report.py
- name: Upload benchmark reports
uses: actions/upload-artifact@v4
From 643c897479f3721b94764fcbba3ac29a04697381 Mon Sep 17 00:00:00 2001
From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com>
Date: Sun, 11 May 2025 01:25:27 +0100
Subject: [PATCH 28/28] Install pandas before generating custom benchmark
reports
---
.github/workflows/run-benchmarks.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/run-benchmarks.yml b/.github/workflows/run-benchmarks.yml
index d0b9a65..27e5408 100644
--- a/.github/workflows/run-benchmarks.yml
+++ b/.github/workflows/run-benchmarks.yml
@@ -44,6 +44,7 @@ jobs:
echo "No benchmark reports found, skipping custom report generation."
exit 1
fi
+ pip3 install pandas
python3 .github/scripts/custom_benchmark_report.py
- name: Upload benchmark reports