+
+
+
+
\ No newline at end of file
diff --git a/.github/runners/runner-x64/Dockerfile b/.github/runners/runner-x64/Dockerfile
index 79ee4e3..de3cb24 100644
--- a/.github/runners/runner-x64/Dockerfile
+++ b/.github/runners/runner-x64/Dockerfile
@@ -7,7 +7,7 @@ ARG DEBIAN_FRONTEND=noninteractive
RUN apt update -y && apt upgrade -y && useradd -m docker
RUN apt install -y --no-install-recommends \
- curl jq git \
+ curl jq git unzip \
# dev dependencies
build-essential libssl-dev libffi-dev python3 python3-venv python3-dev python3-pip \
# dot net core dependencies
@@ -15,6 +15,12 @@ RUN apt install -y --no-install-recommends \
# Rust and Cargo dependencies
gcc cmake
+# Install GitHub CLI
+RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
+ && chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
+ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
+ && apt update -y && apt install -y gh \
+ && rm -rf /var/lib/apt/lists/*
# Install Rust and Cargo
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
diff --git a/.github/scripts/custom_benchmark_report.py b/.github/scripts/custom_benchmark_report.py
new file mode 100644
index 0000000..0e41d87
--- /dev/null
+++ b/.github/scripts/custom_benchmark_report.py
@@ -0,0 +1,301 @@
+# create_benchmark_table.py
+
+import argparse
+import json
+import re
+import sys
+from pathlib import Path
+from pprint import pprint
+from collections import defaultdict
+from typing import Dict, Any, Optional
+
+import pandas as pd
+import html # Import the html module for escaping
+
+
+# Regular expression to parse "test_name (size)" format
+DIR_PATTERN = re.compile(r"^(.*?) \((.*?)\)$")
+
+# Standard location for criterion estimates relative to the benchmark dir
+ESTIMATES_PATH_NEW = Path("new") / "estimates.json"
+# Fallback location (older versions or baseline comparisons)
+ESTIMATES_PATH_BASE = Path("base") / "estimates.json"
+
+# Standard location for the HTML report relative to the benchmark's specific directory
+REPORT_HTML_RELATIVE_PATH = Path("report") / "index.html"
+
+
+def load_criterion_reports(criterion_root_dir: Path) -> Dict[str, Dict[str, Dict[str, Any]]]:
+ """
+ Loads Criterion benchmark results from a specified directory and finds HTML paths.
+
+ Args:
+ criterion_root_dir: The Path object pointing to the main
+ 'target/criterion' directory.
+
+ Returns:
+ A nested dictionary structured as:
+ { test_name: { size: {'json': json_content, 'html_path': relative_html_path}, ... }, ... }
+ Returns an empty dict if the root directory is not found or empty.
+ """
+ results: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict)
+
+ if not criterion_root_dir.is_dir():
+ print(
+ f"Error: Criterion root directory not found or is not a directory: {criterion_root_dir}",
+ file=sys.stderr,
+ )
+ return {}
+
+ print(f"Scanning for benchmark reports in: {criterion_root_dir}")
+
+ for item in criterion_root_dir.iterdir():
+ # We are only interested in directories matching the pattern
+ if not item.is_dir():
+ continue
+
+ match = DIR_PATTERN.match(item.name)
+ if not match:
+ # print(f"Skipping directory (name doesn't match pattern): {item.name}")
+ continue
+
+ test_name = match.group(1).strip()
+ size = match.group(2).strip()
+ benchmark_dir_name = item.name # Store the original directory name
+ benchmark_dir_path = item # The Path object to the benchmark dir
+
+ json_path: Optional[Path] = None
+
+ # Look for the estimates JSON file (prefer 'new', fallback to 'base')
+ if (benchmark_dir_path / ESTIMATES_PATH_NEW).is_file():
+ json_path = benchmark_dir_path / ESTIMATES_PATH_NEW
+ elif (benchmark_dir_path / ESTIMATES_PATH_BASE).is_file():
+ json_path = benchmark_dir_path / ESTIMATES_PATH_BASE
+
+ # The HTML report is at a fixed location relative to the benchmark directory
+ html_path = benchmark_dir_path / REPORT_HTML_RELATIVE_PATH
+
+
+ if json_path is None or not json_path.is_file():
+ print(
+ f"Warning: Could not find estimates JSON in {benchmark_dir_path}. Skipping benchmark size '{test_name} ({size})'.",
+ file=sys.stderr,
+ )
+ continue # Skip if no JSON data
+
+ if not html_path.is_file():
+ print(
+ f"Warning: Could not find HTML report at expected location {html_path}. Skipping benchmark size '{test_name} ({size})'.",
+ file=sys.stderr,
+ )
+ continue # Skip if no HTML report
+
+ # Try loading the JSON data
+ try:
+ with json_path.open("r", encoding="utf-8") as f:
+ json_data = json.load(f)
+
+ # Store both the JSON data and the relative path to the HTML report
+ results[test_name][size] = {
+ 'json': json_data,
+ # The path from the criterion root to the specific benchmark's report/index.html
+ 'html_path_relative_to_criterion_root': str(Path(benchmark_dir_name) / REPORT_HTML_RELATIVE_PATH).replace('\\', '/') # Ensure forward slashes
+ }
+ # print(f" Loaded: {test_name} ({size}) from {json_path}, html: {html_path}")
+ except json.JSONDecodeError:
+ print(f"Error: Failed to decode JSON from {json_path}", file=sys.stderr)
+ except IOError as e:
+ print(f"Error: Failed to read file {json_path}: {e}", file=sys.stderr)
+ except Exception as e:
+ print(
+ f"Error: An unexpected error occurred loading {json_path}: {e}",
+ file=sys.stderr,
+ )
+
+ # Convert defaultdict back to regular dict for cleaner output (optional)
+ return dict(results)
+
+
+def format_nanoseconds(ns: float) -> str:
+ """Formats nanoseconds into a human-readable string with units."""
+ if pd.isna(ns):
+ return "-"
+ if ns < 1_000:
+ return f"{ns:.2f} ns"
+ elif ns < 1_000_000:
+ return f"{ns / 1_000:.2f} ยตs"
+ elif ns < 1_000_000_000:
+ return f"{ns / 1_000_000:.2f} ms"
+ else:
+ return f"{ns / 1_000_000_000:.2f} s"
+
+
+def generate_html_table_with_links(results: Dict[str, Dict[str, Dict[str, Any]]], html_base_path: str) -> str:
+ """
+ Generates an HTML table from benchmark results, with cells linking to reports.
+
+ Args:
+ results: The nested dictionary loaded by load_criterion_reports,
+ including 'json' data and 'html_path_relative_to_criterion_root'.
+ html_base_path: The base URL path where the 'target/criterion' directory
+ is hosted on the static site, relative to the output HTML file.
+ e.g., '../target/criterion/'
+
+ Returns:
+ A string containing the full HTML table.
+ """
+ if not results:
+ return "
No benchmark results found or loaded.
"
+
+ # Get all unique sizes (columns) and test names (rows)
+ # Using ordered dictionaries to maintain insertion order from loading, then sorting keys
+ # Or simply sort the keys after extraction:
+ all_sizes = sorted(list(set(size for test_data in results.values() for size in test_data.keys())))
+ all_test_names = sorted(list(results.keys()))
+
+ html_string = """
+
+
Criterion Benchmark Results
+
Each cell links to the detailed Criterion report for that specific benchmark size.
+
Note: Values shown are the midpoint of the mean confidence interval, formatted for readability.
+
+
+
+
Benchmark Name
+ """
+
+ # Add size headers
+ for size in all_sizes:
+ html_string += f"
{html.escape(size)}
\n"
+
+ html_string += """
+
+
+
+ """
+
+ # Add data rows
+ for test_name in all_test_names:
+ html_string += f"
\n"
+ html_string += f"
{html.escape(test_name)}
\n"
+
+ # Iterate through all possible sizes to ensure columns align
+ for size in all_sizes:
+ cell_data = results.get(test_name, {}).get(size)
+ mean_value = pd.NA # Default value
+ full_report_url = "#" # Default link to self or dummy
+
+ if cell_data and 'json' in cell_data and 'html_path_relative_to_criterion_root' in cell_data:
+ try:
+ # Extract mean from JSON
+ mean_data = cell_data['json'].get("mean")
+ if mean_data and "confidence_interval" in mean_data:
+ ci = mean_data["confidence_interval"]
+ if "lower_bound" in ci and "upper_bound" in ci:
+ lower, upper = ci["lower_bound"], ci["upper_bound"]
+ if isinstance(lower, (int, float)) and isinstance(upper, (int, float)):
+ mean_value = (lower + upper) / 2.0
+ else:
+ print(f"Warning: Non-numeric bounds for {test_name} ({size}).", file=sys.stderr)
+ else:
+ print(f"Warning: Missing confidence_interval bounds for {test_name} ({size}).", file=sys.stderr)
+ else:
+ print(f"Warning: Missing 'mean' data for {test_name} ({size}).", file=sys.stderr)
+
+ # Construct the full relative URL
+ relative_report_path = cell_data['html_path_relative_to_criterion_root']
+ full_report_url = f"{html_base_path}{relative_report_path}"
+ # Ensure forward slashes and resolve potential double slashes if html_base_path ends in /
+ full_report_url = str(Path(full_report_url)).replace('\\', '/')
+
+
+ except Exception as e:
+ print(f"Error processing cell data for {test_name} ({size}): {e}", file=sys.stderr)
+ # Keep mean_value as NA and URL as '#'
+
+ # Format the mean value for display
+ formatted_mean = format_nanoseconds(mean_value)
+
+ # Create the link cell
+ # Only make it a link if a valid report path was found
+ if full_report_url and full_report_url != "#":
+ html_string += f'
\n'
+ else:
+ # Display value without a link if no report path
+ html_string += f'
{html.escape(formatted_mean)}
\n'
+
+
+ html_string += f"
\n"
+
+ html_string += """
+
+
+ """
+
+ return html_string
+
+
+if __name__ == "__main__":
+ DEFAULT_CRITERION_PATH = "target/criterion"
+ # Default relative path from benchmark_results.html to the criterion root on the hosted site
+ # Assumes benchmark_results.html is in .../doc//benchmarks/
+ # And target/criterion is copied to .../doc//target/criterion/
+ # So the path from benchmarks/ to target/criterion/ is ../target/criterion/
+ DEFAULT_HTML_BASE_PATH = "../target/criterion/"
+
+ parser = argparse.ArgumentParser(
+ description="Load Criterion benchmark results from JSON files and generate an HTML table with links to reports."
+ )
+ parser.add_argument(
+ "--criterion-dir",
+ type=str,
+ default=DEFAULT_CRITERION_PATH,
+ help=f"Path to the main 'target/criterion' directory (default: {DEFAULT_CRITERION_PATH}) on the runner.",
+ )
+ parser.add_argument(
+ "--html-base-path",
+ type=str,
+ default=DEFAULT_HTML_BASE_PATH,
+ help=f"Relative URL path from the output HTML file to the hosted 'target/criterion' directory (default: {DEFAULT_HTML_BASE_PATH}).",
+ )
+ parser.add_argument(
+ "--output-file",
+ type=str,
+ default="benchmark_results.html",
+ help="Name of the output HTML file (default: benchmark_results.html)."
+ )
+
+
+ args = parser.parse_args()
+
+ criterion_path = Path(args.criterion_dir)
+ all_results = load_criterion_reports(criterion_path)
+
+ if not all_results:
+ print("\nNo benchmark results found or loaded.")
+ # Still create an empty file or a file with an error message
+ try:
+ with open(args.output_file, "w", encoding="utf-8") as f:
+ f.write("
Criterion Benchmark Results
No benchmark results found or loaded.
")
+ print(f"Created empty/error HTML file: {args.output_file}")
+ except IOError as e:
+ print(f"Error creating empty/error HTML file {args.output_file}: {e}", file=sys.stderr)
+ sys.exit(1) # Indicate failure if no data was loaded successfully
+
+ print("\nSuccessfully loaded benchmark results.")
+ # pprint(all_results) # Uncomment for debugging
+
+ print(f"Generating HTML table with links using base path: {args.html_base_path}")
+ html_output = generate_html_table_with_links(all_results, args.html_base_path)
+
+ try:
+ with open(args.output_file, "w", encoding="utf-8") as f:
+ f.write(html_output)
+ print(f"\nSuccessfully wrote HTML table to {args.output_file}")
+ sys.exit(0) # Exit successfully
+ except IOError as e:
+ print(f"Error writing HTML output to {args.output_file}: {e}", file=sys.stderr)
+ sys.exit(1)
+ except Exception as e:
+ print(f"An unexpected error occurred while writing HTML: {e}", file=sys.stderr)
+ sys.exit(1)
\ No newline at end of file
diff --git a/.github/workflows/docs-and-testcov.yml b/.github/workflows/docs-and-testcov.yml
index 8ace0a8..c845a1a 100644
--- a/.github/workflows/docs-and-testcov.yml
+++ b/.github/workflows/docs-and-testcov.yml
@@ -6,10 +6,14 @@ concurrency:
on:
push:
- branches: [main]
+ branches: [main, docs_page]
# pull_request:
# branches: [main]
workflow_dispatch:
+ workflow_run:
+ workflows: ["run-benchmarks"]
+ types:
+ - completed
permissions:
contents: read
@@ -100,6 +104,36 @@ jobs:
<(echo '{}') \
> last-commit-date.json
+ - name: Download last available benchmark report
+ run: |
+ artifact_url=$(gh api -H "Accept: application/vnd.github+json" \
+ /repos/${{ github.repository }}/actions/artifacts \
+ | jq -r '.artifacts[] | select(.name | startswith("benchmark-reports")) | .archive_download_url' | head -n 1)
+
+ if [ -z "$artifact_url" ]; then
+ echo "No benchmark artifact found!"
+ exit 1
+ fi
+
+ curl -L -H "Authorization: Bearer ${{ secrets.CUSTOM_GH_TOKEN }}" \
+ "$artifact_url" -o benchmark-report.zip
+
+
+
+ # Print all files in the current directory
+ echo "Files in the current directory:"
+ ls -al
+
+ # check if the zip file is valid
+ if ! unzip -tq benchmark-report.zip; then
+ echo "benchmark-report.zip is invalid or corrupted!"
+ exit 1
+ fi
+
+ unzip -q benchmark-report.zip -d benchmark-report
+
+ echo "" > benchmark-report/index.html
+
- name: Copy files to output directory
run: |
# mkdir docs
@@ -113,16 +147,16 @@ jobs:
cp last-commit-date.json target/doc/docs/
# cp -r .github target/doc/docs
cp .github/rustframe_logo.png target/doc/docs/
- echo "" > target/doc/index.html
+ # echo "" > target/doc/index.html
touch target/doc/.nojekyll
- # verify that logo exists in the output directory
- - name: Verify logo directory
+ # copy the benchmark report to the output directory
+ cp -r benchmark-report target/doc/
+
+ - name: Add index.html to output directory
run: |
- if [ ! -f target/doc/docs/rustframe_logo.png ]; then
- echo "Logo not found in output directory!"
- exit 1
- fi
+ cp .github/htmldocs/index.html target/doc/index.html
+ cp .github/rustframe_logo.png target/doc/rustframe_logo.png
- name: Upload Pages artifact
# if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
diff --git a/.github/workflows/run-benchmarks.yml b/.github/workflows/run-benchmarks.yml
index 50ec98c..baea81a 100644
--- a/.github/workflows/run-benchmarks.yml
+++ b/.github/workflows/run-benchmarks.yml
@@ -1,4 +1,4 @@
-name: Run benchmarks
+name: run-benchmarks
on:
workflow_dispatch:
diff --git a/.gitignore b/.gitignore
index 665001e..ac6b27b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,4 +14,6 @@ data/
.venv/
.vscode/
-tarpaulin-report.*
\ No newline at end of file
+tarpaulin-report.*
+
+.github/htmldocs/rustframe_logo.png
\ No newline at end of file