diff --git a/.github/htmldocs/index.html b/.github/htmldocs/index.html
new file mode 100644
index 0000000..b39f850
--- /dev/null
+++ b/.github/htmldocs/index.html
@@ -0,0 +1,74 @@
+
+
+
+
+
+
+ Rustframe
+
+
+
+
+
+
+
+ 
+ Rustframe
+
+ A lightweight dataframe & math toolkit for Rust
+
+
+ 📚 Docs |
+ 📊 Benchmarks
+
+
+ 🦀 Crates.io |
+ 🔖 docs.rs
+
+ 🐙 GitHub |
+ 🌐 Gitea mirror
+
+
+
+
+
\ No newline at end of file
diff --git a/.github/runners/runner-x64/Dockerfile b/.github/runners/runner-x64/Dockerfile
index 79ee4e3..de3cb24 100644
--- a/.github/runners/runner-x64/Dockerfile
+++ b/.github/runners/runner-x64/Dockerfile
@@ -7,7 +7,7 @@ ARG DEBIAN_FRONTEND=noninteractive
RUN apt update -y && apt upgrade -y && useradd -m docker
RUN apt install -y --no-install-recommends \
- curl jq git \
+ curl jq git unzip \
# dev dependencies
build-essential libssl-dev libffi-dev python3 python3-venv python3-dev python3-pip \
# dot net core dependencies
@@ -15,6 +15,12 @@ RUN apt install -y --no-install-recommends \
# Rust and Cargo dependencies
gcc cmake
+# Install GitHub CLI
+RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
+ && chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
+ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
+ && apt update -y && apt install -y gh \
+ && rm -rf /var/lib/apt/lists/*
# Install Rust and Cargo
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
diff --git a/.github/scripts/custom_benchmark_report.py b/.github/scripts/custom_benchmark_report.py
new file mode 100644
index 0000000..ea3825f
--- /dev/null
+++ b/.github/scripts/custom_benchmark_report.py
@@ -0,0 +1,426 @@
+# create_benchmark_table.py
+
+import argparse
+import json
+import re
+import sys
+from pathlib import Path
+from pprint import pprint
+from collections import defaultdict
+from typing import Dict, Any, Optional
+
+import pandas as pd
+import html # Import the html module for escaping
+
+
+# Regular expression to parse "test_name (size)" format
+DIR_PATTERN = re.compile(r"^(.*?) \((.*?)\)$")
+
+# Standard location for criterion estimates relative to the benchmark dir
+ESTIMATES_PATH_NEW = Path("new") / "estimates.json"
+# Fallback location (older versions or baseline comparisons)
+ESTIMATES_PATH_BASE = Path("base") / "estimates.json"
+
+# Standard location for the HTML report relative to the benchmark's specific directory
+REPORT_HTML_RELATIVE_PATH = Path("report") / "index.html"
+
+
+def get_default_criterion_report_path() -> Path:
+ """
+ Returns the default path for the Criterion benchmark report.
+ This is typically 'target/criterion'.
+ """
+ return Path("target") / "criterion" / "report" / "index.html"
+
+
+def load_criterion_reports(
+ criterion_root_dir: Path,
+) -> Dict[str, Dict[str, Dict[str, Any]]]:
+ """
+ Loads Criterion benchmark results from a specified directory and finds HTML paths.
+
+ Args:
+ criterion_root_dir: The Path object pointing to the main
+ 'target/criterion' directory.
+
+ Returns:
+ A nested dictionary structured as:
+ { test_name: { size: {'json': json_content, 'html_path': relative_html_path}, ... }, ... }
+ Returns an empty dict if the root directory is not found or empty.
+ """
+ results: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict)
+
+ if not criterion_root_dir.is_dir():
+ print(
+ f"Error: Criterion root directory not found or is not a directory: {criterion_root_dir}",
+ file=sys.stderr,
+ )
+ return {}
+
+ print(f"Scanning for benchmark reports in: {criterion_root_dir}")
+
+ for item in criterion_root_dir.iterdir():
+ if not item.is_dir():
+ continue
+
+ match = DIR_PATTERN.match(item.name)
+ if not match:
+ continue
+
+ test_name = match.group(1).strip()
+ size = match.group(2).strip()
+ benchmark_dir_name = item.name
+ benchmark_dir_path = item
+
+ json_path: Optional[Path] = None
+
+ if (benchmark_dir_path / ESTIMATES_PATH_NEW).is_file():
+ json_path = benchmark_dir_path / ESTIMATES_PATH_NEW
+ elif (benchmark_dir_path / ESTIMATES_PATH_BASE).is_file():
+ json_path = benchmark_dir_path / ESTIMATES_PATH_BASE
+
+ html_path = benchmark_dir_path / REPORT_HTML_RELATIVE_PATH
+
+ if json_path is None or not json_path.is_file():
+ print(
+ f"Warning: Could not find estimates JSON in {benchmark_dir_path}. Skipping benchmark size '{test_name} ({size})'.",
+ file=sys.stderr,
+ )
+ continue
+
+ if not html_path.is_file():
+ print(
+ f"Warning: Could not find HTML report at expected location {html_path}. Skipping benchmark size '{test_name} ({size})'.",
+ file=sys.stderr,
+ )
+ continue
+
+ try:
+ with json_path.open("r", encoding="utf-8") as f:
+ json_data = json.load(f)
+
+ results[test_name][size] = {
+ "json": json_data,
+ "html_path_relative_to_criterion_root": str(
+ Path(benchmark_dir_name) / REPORT_HTML_RELATIVE_PATH
+ ).replace("\\", "/"),
+ }
+ except json.JSONDecodeError:
+ print(f"Error: Failed to decode JSON from {json_path}", file=sys.stderr)
+ except IOError as e:
+ print(f"Error: Failed to read file {json_path}: {e}", file=sys.stderr)
+ except Exception as e:
+ print(
+ f"Error: An unexpected error occurred loading {json_path}: {e}",
+ file=sys.stderr,
+ )
+
+ return dict(results)
+
+
+def format_nanoseconds(ns: float) -> str:
+ """Formats nanoseconds into a human-readable string with units."""
+ if pd.isna(ns):
+ return "-"
+ if ns < 1_000:
+ return f"{ns:.2f} ns"
+ elif ns < 1_000_000:
+ return f"{ns / 1_000:.2f} µs"
+ elif ns < 1_000_000_000:
+ return f"{ns / 1_000_000:.2f} ms"
+ else:
+ return f"{ns / 1_000_000_000:.2f} s"
+
+
+def generate_html_table_with_links(
+ results: Dict[str, Dict[str, Dict[str, Any]]], html_base_path: str
+) -> str:
+ """
+ Generates a full HTML page with a styled table from benchmark results.
+ """
+ css_styles = """
+
+ """
+
+ html_doc_start = f"""
+
+
+
+
+ Criterion Benchmark Results
+ {css_styles}
+
+
+
+
Criterion Benchmark Results
+"""
+
+ html_doc_end = """
+
+
+"""
+
+ if not results:
+ return f"""{html_doc_start}
+ No benchmark results found or loaded.
+{html_doc_end}"""
+
+ all_sizes = sorted(
+ list(set(size for test_data in results.values() for size in test_data.keys())),
+ key=(lambda x: int(x.split("x")[0])),
+ )
+ all_test_names = sorted(list(results.keys()))
+
+ table_content = """
+ Each cell links to the detailed Criterion.rs report for that specific benchmark size.
+ Note: Values shown are the midpoint of the mean confidence interval, formatted for readability.
+ [Switch to the standard Criterion.rs report]
+
+
+
+ Benchmark Name |
+ """
+
+ for size in all_sizes:
+ table_content += f"{html.escape(size)} | \n"
+
+ table_content += """
+
+
+
+ """
+
+ for test_name in all_test_names:
+ table_content += f"\n"
+ table_content += f" {html.escape(test_name)} | \n"
+
+ for size in all_sizes:
+ cell_data = results.get(test_name, {}).get(size)
+ mean_value = pd.NA
+ full_report_url = "#"
+
+ if (
+ cell_data
+ and "json" in cell_data
+ and "html_path_relative_to_criterion_root" in cell_data
+ ):
+ try:
+ mean_data = cell_data["json"].get("mean")
+ if mean_data and "confidence_interval" in mean_data:
+ ci = mean_data["confidence_interval"]
+ if "lower_bound" in ci and "upper_bound" in ci:
+ lower, upper = ci["lower_bound"], ci["upper_bound"]
+ if isinstance(lower, (int, float)) and isinstance(
+ upper, (int, float)
+ ):
+ mean_value = (lower + upper) / 2.0
+ else:
+ print(
+ f"Warning: Non-numeric bounds for {test_name} ({size}).",
+ file=sys.stderr,
+ )
+ else:
+ print(
+ f"Warning: Missing confidence_interval bounds for {test_name} ({size}).",
+ file=sys.stderr,
+ )
+ else:
+ print(
+ f"Warning: Missing 'mean' data for {test_name} ({size}).",
+ file=sys.stderr,
+ )
+
+ relative_report_path = cell_data[
+ "html_path_relative_to_criterion_root"
+ ]
+ joined_path = Path(html_base_path) / relative_report_path
+ full_report_url = str(joined_path).replace("\\", "/")
+
+ except Exception as e:
+ print(
+ f"Error processing cell data for {test_name} ({size}): {e}",
+ file=sys.stderr,
+ )
+
+ formatted_mean = format_nanoseconds(mean_value)
+
+ if full_report_url and full_report_url != "#":
+ table_content += f' {html.escape(formatted_mean)} | \n'
+ else:
+ table_content += f" {html.escape(formatted_mean)} | \n"
+ table_content += "
\n"
+
+ table_content += """
+
+
+ """
+ return f"{html_doc_start}{table_content}{html_doc_end}"
+
+
+if __name__ == "__main__":
+ DEFAULT_CRITERION_PATH = "target/criterion"
+ DEFAULT_OUTPUT_FILE = "./target/criterion/index.html"
+ DEFAULT_HTML_BASE_PATH = ""
+
+ parser = argparse.ArgumentParser(
+ description="Load Criterion benchmark results from JSON files and generate an HTML table with links to reports."
+ )
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help="Perform a dry run without writing the HTML file.",
+ )
+ parser.add_argument(
+ "--criterion-dir",
+ type=str,
+ default=DEFAULT_CRITERION_PATH,
+ help=f"Path to the main 'target/criterion' directory (default: {DEFAULT_CRITERION_PATH}) containing benchmark data.",
+ )
+ parser.add_argument(
+ "--html-base-path",
+ type=str,
+ default=DEFAULT_HTML_BASE_PATH,
+ help=(
+ f"Prefix for HTML links to individual benchmark reports. "
+ f"This is prepended to each report's relative path (e.g., 'benchmark_name/report/index.html'). "
+ f"If the main output HTML (default: '{DEFAULT_OUTPUT_FILE}') is in the 'target/criterion/' directory, "
+ f"this should typically be empty (default: '{DEFAULT_HTML_BASE_PATH}'). "
+ ),
+ )
+ parser.add_argument(
+ "--output-file",
+ type=str,
+ default=DEFAULT_OUTPUT_FILE,
+ help=f"Path to save the generated HTML summary report (default: {DEFAULT_OUTPUT_FILE}).",
+ )
+
+ args = parser.parse_args()
+
+ if args.dry_run:
+ print(
+ "Dry run mode: No files will be written. Use --dry-run to skip writing the HTML file."
+ )
+ sys.exit(0)
+
+ criterion_path = Path(args.criterion_dir)
+ output_file_path = Path(args.output_file)
+
+ try:
+ output_file_path.parent.mkdir(parents=True, exist_ok=True)
+ except OSError as e:
+ print(
+ f"Error: Could not create output directory {output_file_path.parent}: {e}",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ all_results = load_criterion_reports(criterion_path)
+
+ # Generate HTML output regardless of whether results were found (handles "no results" page)
+ html_output = generate_html_table_with_links(all_results, args.html_base_path)
+
+ if not all_results:
+ print("\nNo benchmark results found or loaded.")
+ # Fallthrough to write the "no results" page generated by generate_html_table_with_links
+ else:
+ print("\nSuccessfully loaded benchmark results.")
+ # pprint(all_results) # Uncomment for debugging
+
+ print(
+ f"Generating HTML report with links using HTML base path: '{args.html_base_path}'"
+ )
+
+ try:
+ with output_file_path.open("w", encoding="utf-8") as f:
+ f.write(html_output)
+ print(f"\nSuccessfully wrote HTML report to {output_file_path}")
+ if not all_results:
+ sys.exit(1) # Exit with error code if no results, though file is created
+ sys.exit(0)
+ except IOError as e:
+ print(f"Error writing HTML output to {output_file_path}: {e}", file=sys.stderr)
+ sys.exit(1)
+ except Exception as e:
+ print(f"An unexpected error occurred while writing HTML: {e}", file=sys.stderr)
+ sys.exit(1)
diff --git a/.github/workflows/docs-and-testcov.yml b/.github/workflows/docs-and-testcov.yml
index 7408e46..c946b9b 100644
--- a/.github/workflows/docs-and-testcov.yml
+++ b/.github/workflows/docs-and-testcov.yml
@@ -10,6 +10,10 @@ on:
# pull_request:
# branches: [main]
workflow_dispatch:
+ workflow_run:
+ workflows: ["run-benchmarks"]
+ types:
+ - completed
permissions:
contents: read
@@ -44,6 +48,14 @@ jobs:
toolchain: stable
override: true
+ - name: Replace logo URL in README.md
+ env:
+ LOGO_URL: ${{ secrets.LOGO_URL }}
+ run: |
+ # replace with EXAMPLE.COM/LOGO
+
+ sed -i 's|.github/rustframe_logo.png|rustframe_logo.png|g' README.md
+
- name: Build documentation
run: cargo doc --no-deps --release
@@ -92,20 +104,59 @@ jobs:
<(echo '{}') \
> last-commit-date.json
+ - name: Download last available benchmark report
+ run: |
+ artifact_url=$(gh api -H "Accept: application/vnd.github+json" \
+ /repos/${{ github.repository }}/actions/artifacts \
+ | jq -r '.artifacts[] | select(.name | startswith("benchmark-reports")) | .archive_download_url' | head -n 1)
+
+ if [ -z "$artifact_url" ]; then
+ echo "No benchmark artifact found!"
+ exit 1
+ fi
+
+ curl -L -H "Authorization: Bearer ${{ secrets.CUSTOM_GH_TOKEN }}" \
+ "$artifact_url" -o benchmark-report.zip
+
+
+
+ # Print all files in the current directory
+ echo "Files in the current directory:"
+ ls -al
+
+ # check if the zip file is valid
+ if ! unzip -tq benchmark-report.zip; then
+ echo "benchmark-report.zip is invalid or corrupted!"
+ exit 1
+ fi
+
+ unzip -q benchmark-report.zip -d benchmark-report
+
+ # echo "" > benchmark-report/index.html
+
- name: Copy files to output directory
run: |
# mkdir docs
mkdir -p target/doc/docs
- cp -r target/doc/rustframe/* target/doc/docs/
+ mv target/doc/rustframe/* target/doc/docs/
mkdir output
cp tarpaulin-report.html target/doc/docs/
cp tarpaulin-report.json target/doc/docs/
cp tarpaulin-badge.json target/doc/docs/
cp last-commit-date.json target/doc/docs/
- mkdir -p target/doc/docs/.github
- cp .github/rustframe_logo.png target/doc/docs/.github/
- echo "" > target/doc/index.html
+ # cp -r .github target/doc/docs
+ cp .github/rustframe_logo.png target/doc/docs/
+ # echo "" > target/doc/index.html
+ touch target/doc/.nojekyll
+
+ # copy the benchmark report to the output directory
+ cp -r benchmark-report target/doc/
+
+ - name: Add index.html to output directory
+ run: |
+ cp .github/htmldocs/index.html target/doc/index.html
+ cp .github/rustframe_logo.png target/doc/rustframe_logo.png
- name: Upload Pages artifact
# if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
@@ -115,4 +166,4 @@ jobs:
- name: Deploy to GitHub Pages
# if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
- uses: actions/deploy-pages@v4
\ No newline at end of file
+ uses: actions/deploy-pages@v4
diff --git a/.github/workflows/run-benchmarks.yml b/.github/workflows/run-benchmarks.yml
index a7c3044..3742725 100644
--- a/.github/workflows/run-benchmarks.yml
+++ b/.github/workflows/run-benchmarks.yml
@@ -1,10 +1,10 @@
-name: Run benchmarks
+name: run-benchmarks
on:
workflow_dispatch:
- # push:
- # branches:
- # - main
+ push:
+ branches:
+ - main
jobs:
pick-runner:
@@ -33,8 +33,28 @@ jobs:
with:
toolchain: stable
+ - name: Install Python
+ uses: actions/setup-python@v4
+ - name: Install uv
+ uses: astral-sh/setup-uv@v5
+ - name: Setup venv
+ run: |
+ uv venv
+ uv pip install pandas
+ uv run .github/scripts/custom_benchmark_report.py --dry-run
+
- name: Run benchmarks
- run: cargo bench
+ run: cargo bench --features bench
+
+ - name: Generate custom benchmark reports
+ run: |
+ if [ -d ./target/criterion ]; then
+ echo "Found benchmark reports, generating custom report..."
+ else
+ echo "No benchmark reports found, skipping custom report generation."
+ exit 1
+ fi
+ uv run .github/scripts/custom_benchmark_report.py
- name: Upload benchmark reports
uses: actions/upload-artifact@v4
diff --git a/.gitignore b/.gitignore
index 665001e..ac6b27b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,4 +14,6 @@ data/
.venv/
.vscode/
-tarpaulin-report.*
\ No newline at end of file
+tarpaulin-report.*
+
+.github/htmldocs/rustframe_logo.png
\ No newline at end of file
diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs
index cce0a6b..bb74a64 100644
--- a/benches/benchmarks.rs
+++ b/benches/benchmarks.rs
@@ -1,35 +1,22 @@
-// Combined benchmarks for rustframe
+// Combined benchmarks
use chrono::NaiveDate;
use criterion::{criterion_group, criterion_main, Criterion};
-// Import Duration for measurement_time and warm_up_time
+
use rustframe::{
frame::{Frame, RowIndex},
- matrix::{BoolMatrix, Matrix},
+ matrix::{BoolMatrix, Matrix, SeriesOps},
utils::{BDateFreq, BDatesList},
};
use std::time::Duration;
-// You can define a custom Criterion configuration function
-// This will be passed to the criterion_group! macro
-pub fn for_short_runs() -> Criterion {
- Criterion::default()
- // (samples != total iterations)
- // limits the number of statistical data points.
- .sample_size(50)
- // measurement time per sample
- .measurement_time(Duration::from_millis(2000))
- // reduce warm-up time as well for faster overall run
- .warm_up_time(Duration::from_millis(50))
- // You could also make it much shorter if needed, e.g., 50ms measurement, 100ms warm-up
- // .measurement_time(Duration::from_millis(50))
- // .warm_up_time(Duration::from_millis(100))
-}
+// Define size categories
+const SIZES_SMALL: [usize; 1] = [1];
+const SIZES_MEDIUM: [usize; 3] = [100, 250, 500];
+const SIZES_LARGE: [usize; 1] = [1000];
-fn bool_matrix_operations_benchmark(c: &mut Criterion) {
- let sizes = [1, 100, 1000];
- // let sizes = [1000];
-
- for &size in &sizes {
+// Modified benchmark functions to accept a slice of sizes
+fn bool_matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
let data1: Vec = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@@ -61,11 +48,8 @@ fn bool_matrix_operations_benchmark(c: &mut Criterion) {
}
}
-fn matrix_boolean_operations_benchmark(c: &mut Criterion) {
- let sizes = [1, 100, 1000];
- // let sizes = [1000];
-
- for &size in &sizes {
+fn matrix_boolean_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
let data1: Vec = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@@ -97,11 +81,8 @@ fn matrix_boolean_operations_benchmark(c: &mut Criterion) {
}
}
-fn matrix_operations_benchmark(c: &mut Criterion) {
- let sizes = [1, 100, 1000];
- // let sizes = [1000];
-
- for &size in &sizes {
+fn matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
let data: Vec = (0..size * size).map(|x| x as f64).collect();
let ma = Matrix::from_vec(data.clone(), size, size);
@@ -130,8 +111,7 @@ fn matrix_operations_benchmark(c: &mut Criterion) {
});
}
- // Benchmarking matrix addition
- for &size in &sizes {
+ for &size in sizes {
let data1: Vec = (0..size * size).map(|x| x as f64).collect();
let data2: Vec = (0..size * size).map(|x| (x + 1) as f64).collect();
let ma = Matrix::from_vec(data1.clone(), size, size);
@@ -163,44 +143,136 @@ fn matrix_operations_benchmark(c: &mut Criterion) {
}
}
-fn benchmark_frame_operations(c: &mut Criterion) {
- let n_periods = 1000;
- let n_cols = 1000;
+fn generate_frame(size: usize) -> Frame {
+ let data: Vec = (0..size * size).map(|x| x as f64).collect();
let dates: Vec =
- BDatesList::from_n_periods("2024-01-02".to_string(), BDateFreq::Daily, n_periods)
+ BDatesList::from_n_periods("2000-01-01".to_string(), BDateFreq::Daily, size)
.unwrap()
.list()
.unwrap();
-
- // let col_names= str(i) for i in range(1, 1000)
- let col_names: Vec = (1..=n_cols).map(|i| format!("col_{}", i)).collect();
-
- let data1: Vec = (0..n_periods * n_cols).map(|x| x as f64).collect();
- let data2: Vec = (0..n_periods * n_cols).map(|x| (x + 1) as f64).collect();
- let ma = Matrix::from_vec(data1.clone(), n_periods, n_cols);
- let mb = Matrix::from_vec(data2.clone(), n_periods, n_cols);
-
- let fa = Frame::new(
- ma.clone(),
- col_names.clone(),
- Some(RowIndex::Date(dates.clone())),
- );
- let fb = Frame::new(mb, col_names, Some(RowIndex::Date(dates)));
-
- c.bench_function("frame element-wise multiply (1000x1000)", |b| {
- b.iter(|| {
- let _result = &fa * &fb;
- });
- });
+ let col_names: Vec = (1..=size).map(|i| format!("col_{}", i)).collect();
+ Frame::new(
+ Matrix::from_vec(data.clone(), size, size),
+ col_names,
+ Some(RowIndex::Date(dates)),
+ )
}
-// Define the criterion group and pass the custom configuration function
+fn benchmark_frame_operations(c: &mut Criterion, sizes: &[usize]) {
+ for &size in sizes {
+ let fa = generate_frame(size);
+ let fb = generate_frame(size);
+
+ c.bench_function(&format!("frame add ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = &fa + &fb;
+ });
+ });
+
+ c.bench_function(&format!("frame subtract ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = &fa - &fb;
+ });
+ });
+
+ c.bench_function(&format!("frame multiply ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = &fa * &fb;
+ });
+ });
+
+ c.bench_function(&format!("frame divide ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = &fa / &fb;
+ });
+ });
+
+ c.bench_function(&format!("frame sum_horizontal ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = fa.sum_horizontal();
+ });
+ });
+ c.bench_function(&format!("frame sum_vertical ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = fa.sum_vertical();
+ });
+ });
+ c.bench_function(&format!("frame prod_horizontal ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = fa.prod_horizontal();
+ });
+ });
+ c.bench_function(&format!("frame prod_vertical ({}x{})", size, size), |b| {
+ b.iter(|| {
+ let _result = fa.prod_vertical();
+ });
+ });
+ }
+}
+
+// Runner functions for each size category
+fn run_benchmarks_small(c: &mut Criterion) {
+ bool_matrix_operations_benchmark(c, &SIZES_SMALL);
+ matrix_boolean_operations_benchmark(c, &SIZES_SMALL);
+ matrix_operations_benchmark(c, &SIZES_SMALL);
+ benchmark_frame_operations(c, &SIZES_SMALL);
+}
+
+fn run_benchmarks_medium(c: &mut Criterion) {
+ bool_matrix_operations_benchmark(c, &SIZES_MEDIUM);
+ matrix_boolean_operations_benchmark(c, &SIZES_MEDIUM);
+ matrix_operations_benchmark(c, &SIZES_MEDIUM);
+ benchmark_frame_operations(c, &SIZES_MEDIUM);
+}
+
+fn run_benchmarks_large(c: &mut Criterion) {
+ bool_matrix_operations_benchmark(c, &SIZES_LARGE);
+ matrix_boolean_operations_benchmark(c, &SIZES_LARGE);
+ matrix_operations_benchmark(c, &SIZES_LARGE);
+ benchmark_frame_operations(c, &SIZES_LARGE);
+}
+
+// Configuration functions for different size categories
+fn config_small_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(500)
+ .measurement_time(Duration::from_millis(100))
+ .warm_up_time(Duration::from_millis(5))
+}
+
+fn config_medium_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(100)
+ .measurement_time(Duration::from_millis(2000))
+ .warm_up_time(Duration::from_millis(100))
+}
+
+fn config_large_arrays() -> Criterion {
+ Criterion::default()
+ .sample_size(50)
+ .measurement_time(Duration::from_millis(5000))
+ .warm_up_time(Duration::from_millis(200))
+}
+
+
criterion_group!(
- name = combined_benches;
- config = for_short_runs(); // Use the custom configuration here
- targets = bool_matrix_operations_benchmark,
- matrix_boolean_operations_benchmark,
- matrix_operations_benchmark,
- benchmark_frame_operations
+ name = benches_small_arrays;
+ config = config_small_arrays();
+ targets = run_benchmarks_small
+);
+criterion_group!(
+ name = benches_medium_arrays;
+ config = config_medium_arrays();
+ targets = run_benchmarks_medium
+);
+criterion_group!(
+ name = benches_large_arrays;
+ config = config_large_arrays();
+ targets = run_benchmarks_large
+);
+
+criterion_main!(
+ benches_small_arrays,
+ benches_medium_arrays,
+ benches_large_arrays
);
-criterion_main!(combined_benches);
diff --git a/src/matrix/mat.rs b/src/matrix/mat.rs
index 180650a..18eb804 100644
--- a/src/matrix/mat.rs
+++ b/src/matrix/mat.rs
@@ -2,7 +2,7 @@
use std::ops::{Add, BitAnd, BitOr, BitXor, Div, Index, IndexMut, Mul, Not, Sub};
-/// A column‑major 2D matrix of `T`
+/// A column‑major 2D matrix of `T`. Index as `Array(row, column)`.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Matrix {
rows: usize,