# create_benchmark_table.py import argparse import json import re import sys from pathlib import Path from pprint import pprint from collections import defaultdict from typing import Dict, Any, Optional import pandas as pd import html # Import the html module for escaping # Regular expression to parse "test_name (size)" format DIR_PATTERN = re.compile(r"^(.*?) \((.*?)\)$") # Standard location for criterion estimates relative to the benchmark dir ESTIMATES_PATH_NEW = Path("new") / "estimates.json" # Fallback location (older versions or baseline comparisons) ESTIMATES_PATH_BASE = Path("base") / "estimates.json" # Standard location for the HTML report relative to the benchmark's specific directory REPORT_HTML_RELATIVE_PATH = Path("report") / "index.html" def get_default_criterion_report_path() -> Path: """ Returns the default path for the Criterion benchmark report. This is typically 'target/criterion'. """ return Path("target") / "criterion" / "report" / "index.html" def load_criterion_reports( criterion_root_dir: Path, ) -> Dict[str, Dict[str, Dict[str, Any]]]: """ Loads Criterion benchmark results from a specified directory and finds HTML paths. Args: criterion_root_dir: The Path object pointing to the main 'target/criterion' directory. Returns: A nested dictionary structured as: { test_name: { size: {'json': json_content, 'html_path': relative_html_path}, ... }, ... } Returns an empty dict if the root directory is not found or empty. """ results: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict) if not criterion_root_dir.is_dir(): print( f"Error: Criterion root directory not found or is not a directory: {criterion_root_dir}", file=sys.stderr, ) return {} print(f"Scanning for benchmark reports in: {criterion_root_dir}") for item in criterion_root_dir.iterdir(): if not item.is_dir(): continue match = DIR_PATTERN.match(item.name) if not match: continue test_name = match.group(1).strip() size = match.group(2).strip() benchmark_dir_name = item.name benchmark_dir_path = item json_path: Optional[Path] = None if (benchmark_dir_path / ESTIMATES_PATH_NEW).is_file(): json_path = benchmark_dir_path / ESTIMATES_PATH_NEW elif (benchmark_dir_path / ESTIMATES_PATH_BASE).is_file(): json_path = benchmark_dir_path / ESTIMATES_PATH_BASE html_path = benchmark_dir_path / REPORT_HTML_RELATIVE_PATH if json_path is None or not json_path.is_file(): print( f"Warning: Could not find estimates JSON in {benchmark_dir_path}. Skipping benchmark size '{test_name} ({size})'.", file=sys.stderr, ) continue if not html_path.is_file(): print( f"Warning: Could not find HTML report at expected location {html_path}. Skipping benchmark size '{test_name} ({size})'.", file=sys.stderr, ) continue try: with json_path.open("r", encoding="utf-8") as f: json_data = json.load(f) results[test_name][size] = { "json": json_data, "html_path_relative_to_criterion_root": str( Path(benchmark_dir_name) / REPORT_HTML_RELATIVE_PATH ).replace("\\", "/"), } except json.JSONDecodeError: print(f"Error: Failed to decode JSON from {json_path}", file=sys.stderr) except IOError as e: print(f"Error: Failed to read file {json_path}: {e}", file=sys.stderr) except Exception as e: print( f"Error: An unexpected error occurred loading {json_path}: {e}", file=sys.stderr, ) return dict(results) def format_nanoseconds(ns: float) -> str: """Formats nanoseconds into a human-readable string with units.""" if pd.isna(ns): return "-" if ns < 1_000: return f"{ns:.2f} ns" elif ns < 1_000_000: return f"{ns / 1_000:.2f} µs" elif ns < 1_000_000_000: return f"{ns / 1_000_000:.2f} ms" else: return f"{ns / 1_000_000_000:.2f} s" def generate_html_table_with_links( results: Dict[str, Dict[str, Dict[str, Any]]], html_base_path: str ) -> str: """ Generates a full HTML page with a styled table from benchmark results. """ css_styles = """ """ html_doc_start = f"""
No benchmark results found or loaded.
{html_doc_end}""" all_sizes = sorted( list(set(size for test_data in results.values() for size in test_data.keys())), key=(lambda x: int(x.split("x")[0])), ) all_test_names = sorted(list(results.keys())) table_content = """Each cell links to the detailed Criterion.rs report for that specific benchmark size.
Note: Values shown are the midpoint of the mean confidence interval, formatted for readability.
[Switch to the standard Criterion.rs report]
Benchmark Name | """ for size in all_sizes: table_content += f"{html.escape(size)} | \n" table_content += """|
---|---|---|
{html.escape(test_name)} | \n" for size in all_sizes: cell_data = results.get(test_name, {}).get(size) mean_value = pd.NA full_report_url = "#" if ( cell_data and "json" in cell_data and "html_path_relative_to_criterion_root" in cell_data ): try: mean_data = cell_data["json"].get("mean") if mean_data and "confidence_interval" in mean_data: ci = mean_data["confidence_interval"] if "lower_bound" in ci and "upper_bound" in ci: lower, upper = ci["lower_bound"], ci["upper_bound"] if isinstance(lower, (int, float)) and isinstance( upper, (int, float) ): mean_value = (lower + upper) / 2.0 else: print( f"Warning: Non-numeric bounds for {test_name} ({size}).", file=sys.stderr, ) else: print( f"Warning: Missing confidence_interval bounds for {test_name} ({size}).", file=sys.stderr, ) else: print( f"Warning: Missing 'mean' data for {test_name} ({size}).", file=sys.stderr, ) relative_report_path = cell_data[ "html_path_relative_to_criterion_root" ] joined_path = Path(html_base_path) / relative_report_path full_report_url = str(joined_path).replace("\\", "/") except Exception as e: print( f"Error processing cell data for {test_name} ({size}): {e}", file=sys.stderr, ) formatted_mean = format_nanoseconds(mean_value) if full_report_url and full_report_url != "#": table_content += f'{html.escape(formatted_mean)} | \n' else: table_content += f"{html.escape(formatted_mean)} | \n" table_content += "