Compare commits

..

34 Commits

Author SHA1 Message Date
Palash Tyagi
485d430ad1
Merge 77983eef777e9461723bb09838b20705716bbae5 into e9eeac0c40e5f12117d7102462ad64778c3f8db0 2025-05-11 02:00:36 +01:00
77983eef77
Merge branch 'main' into date_utils 2025-05-11 02:00:34 +01:00
e9eeac0c40
Merge pull request #43 from Magnus167/docs_page
Enhance docs site
2025-05-11 01:59:58 +01:00
Palash Tyagi
85482d9569 Restrict push trigger to only the main branch in workflow configuration 2025-05-11 01:58:04 +01:00
Palash Tyagi
c05f1696f0 Remove automatic redirect in benchmark report index.html generation 2025-05-11 01:55:05 +01:00
Palash Tyagi
e3f4749709 Add dry run option to custom benchmark report script and update workflow to use it 2025-05-11 01:40:25 +01:00
Palash Tyagi
643c897479 Install pandas before generating custom benchmark reports 2025-05-11 01:25:27 +01:00
Palash Tyagi
876f1ccbf3 Fix Python command to use python3 for custom benchmark report generation 2025-05-11 01:06:27 +01:00
Palash Tyagi
9702b6d5c4 Add custom benchmark report generation step to workflow 2025-05-11 00:55:29 +01:00
Palash Tyagi
6a9b828ada Sort benchmark sizes numerically in HTML report generation 2025-05-11 00:51:35 +01:00
Palash Tyagi
1a5b8919d3 Adjust benchmark configuration for small arrays to optimize measurement and warm-up times 2025-05-11 00:51:24 +01:00
Palash Tyagi
2e980a78fa Update documentation for Matrix struct to clarify indexing method 2025-05-11 00:46:46 +01:00
Palash Tyagi
dfe259a371 move benchmark config 2025-05-07 20:20:42 +01:00
Palash Tyagi
4e74c2dcfe Refactor benchmark configurations to improve size categorization and sampling settings 2025-05-07 00:55:09 +01:00
Palash Tyagi
498f822672 Refactor custom benchmark report script to improve HTML generation and structure 2025-05-07 00:08:39 +01:00
Palash Tyagi
34809656f6 Enhance frame benchmarks by adding arithmetic operations and refactoring frame generation 2025-05-07 00:08:28 +01:00
Palash Tyagi
894b85b384 Refactor benchmarks to use a constant for sizes 2025-05-06 18:06:36 +01:00
Palash Tyagi
b758b22b93 Add custom benchmark report generation script with HTML output 2025-05-06 00:14:09 +01:00
Palash Tyagi
bb0bffba73 Refactor documentation output steps by removing logo verification and updating index.html copy process 2025-05-05 23:27:27 +01:00
Palash Tyagi
bca1121004 Fix icon path in index.html and update .gitignore to include extra copies rustframe_logo.png 2025-05-05 23:27:18 +01:00
Palash Tyagi
b3b0e5e3ae Update index.html to enhance documentation links and add benchmarks section 2025-05-05 23:21:33 +01:00
Palash Tyagi
eeabfcfff6 Simplify benchmark report extraction process by using quiet unzip and removing unnecessary steps 2025-05-05 22:37:25 +01:00
Palash Tyagi
eab1c5eec1 Download benchmark report zip file in CI workflow 2025-05-05 22:36:00 +01:00
Palash Tyagi
efe44c7399 Update benchmark report download step to use custom GitHub token for authorization 2025-05-05 22:34:41 +01:00
Palash Tyagi
33fea1d126 Refactor benchmark report download step to improve error handling and add directory listing 2025-05-05 22:33:25 +01:00
Palash Tyagi
f0de677b69 Add validation for benchmark report zip file and list directory contents 2025-05-05 22:31:31 +01:00
Palash Tyagi
bbcdbb4151 Add error handling for missing benchmark report zip file in CI workflow 2025-05-05 22:28:14 +01:00
Palash Tyagi
aec6278a50 Add debugging output for benchmark report extraction in CI workflow 2025-05-05 22:22:50 +01:00
Palash Tyagi
054b3c828e Add 'unzip' to the list of installed packages in Dockerfile 2025-05-05 22:00:23 +01:00
Palash Tyagi
659e93c27d testing runners 2025-05-05 21:57:43 +01:00
Palash Tyagi
f520f29f11 Add installation of GitHub CLI in Dockerfile 2025-05-05 21:55:56 +01:00
Palash Tyagi
db8b756a74 Update push trigger to include 'docs_page' branch in CI workflow 2025-05-05 21:33:35 +01:00
Palash Tyagi
d8154a1175 Enhance CI workflow to download and include benchmark reports in documentation output 2025-05-05 21:32:51 +01:00
Palash Tyagi
4d4b6d1656 Add initial HTML landing page 2025-05-05 21:05:48 +01:00
8 changed files with 713 additions and 79 deletions

74
.github/htmldocs/index.html vendored Normal file
View File

@ -0,0 +1,74 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Rustframe</title>
<link rel="icon" type="image/png" href="./rustframe_logo.png">
<style>
body {
font-family: Arial, sans-serif;
background-color: #2b2b2b;
color: #d4d4d4;
margin: 0;
padding: 0;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
}
main {
text-align: center;
padding: 20px;
background-color: #3c3c3c;
border-radius: 10px;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
max-width: 600px;
}
img {
max-width: 100px;
margin-bottom: 20px;
}
h1 {
/* logo is b35f20 */
color: #f8813f;
}
a {
color: #ff9a60;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
</style>
</head>
<body>
<main>
<h1>
<img src="./rustframe_logo.png" alt="Rustframe Logo"><br>
Rustframe
</h1>
<h2>A lightweight dataframe & math toolkit for Rust</h2>
<hr style="border: 1px solid #d4d4d4; margin: 20px 0;">
<p>
📚 <a href="https://magnus167.github.io/rustframe/docs">Docs</a> |
📊 <a href="https://magnus167.github.io/rustframe/benchmark-report/">Benchmarks</a>
<br><br>
🦀 <a href="https://crates.io/crates/rustframe">Crates.io</a> |
🔖 <a href="https://docs.rs/rustframe/latest/rustframe/">docs.rs</a>
<br><br>
🐙 <a href="https://github.com/Magnus167/rustframe">GitHub</a> |
🌐 <a href="https://gitea.nulltech.uk/Magnus167/rustframe">Gitea mirror</a>
</p>
</main>
</body>
</html>

View File

@ -7,7 +7,7 @@ ARG DEBIAN_FRONTEND=noninteractive
RUN apt update -y && apt upgrade -y && useradd -m docker
RUN apt install -y --no-install-recommends \
curl jq git \
curl jq git unzip \
# dev dependencies
build-essential libssl-dev libffi-dev python3 python3-venv python3-dev python3-pip \
# dot net core dependencies
@ -15,6 +15,12 @@ RUN apt install -y --no-install-recommends \
# Rust and Cargo dependencies
gcc cmake
# Install GitHub CLI
RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
&& chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& apt update -y && apt install -y gh \
&& rm -rf /var/lib/apt/lists/*
# Install Rust and Cargo
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y

View File

@ -0,0 +1,426 @@
# create_benchmark_table.py
import argparse
import json
import re
import sys
from pathlib import Path
from pprint import pprint
from collections import defaultdict
from typing import Dict, Any, Optional
import pandas as pd
import html # Import the html module for escaping
# Regular expression to parse "test_name (size)" format
DIR_PATTERN = re.compile(r"^(.*?) \((.*?)\)$")
# Standard location for criterion estimates relative to the benchmark dir
ESTIMATES_PATH_NEW = Path("new") / "estimates.json"
# Fallback location (older versions or baseline comparisons)
ESTIMATES_PATH_BASE = Path("base") / "estimates.json"
# Standard location for the HTML report relative to the benchmark's specific directory
REPORT_HTML_RELATIVE_PATH = Path("report") / "index.html"
def get_default_criterion_report_path() -> Path:
"""
Returns the default path for the Criterion benchmark report.
This is typically 'target/criterion'.
"""
return Path("target") / "criterion" / "report" / "index.html"
def load_criterion_reports(
criterion_root_dir: Path,
) -> Dict[str, Dict[str, Dict[str, Any]]]:
"""
Loads Criterion benchmark results from a specified directory and finds HTML paths.
Args:
criterion_root_dir: The Path object pointing to the main
'target/criterion' directory.
Returns:
A nested dictionary structured as:
{ test_name: { size: {'json': json_content, 'html_path': relative_html_path}, ... }, ... }
Returns an empty dict if the root directory is not found or empty.
"""
results: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict)
if not criterion_root_dir.is_dir():
print(
f"Error: Criterion root directory not found or is not a directory: {criterion_root_dir}",
file=sys.stderr,
)
return {}
print(f"Scanning for benchmark reports in: {criterion_root_dir}")
for item in criterion_root_dir.iterdir():
if not item.is_dir():
continue
match = DIR_PATTERN.match(item.name)
if not match:
continue
test_name = match.group(1).strip()
size = match.group(2).strip()
benchmark_dir_name = item.name
benchmark_dir_path = item
json_path: Optional[Path] = None
if (benchmark_dir_path / ESTIMATES_PATH_NEW).is_file():
json_path = benchmark_dir_path / ESTIMATES_PATH_NEW
elif (benchmark_dir_path / ESTIMATES_PATH_BASE).is_file():
json_path = benchmark_dir_path / ESTIMATES_PATH_BASE
html_path = benchmark_dir_path / REPORT_HTML_RELATIVE_PATH
if json_path is None or not json_path.is_file():
print(
f"Warning: Could not find estimates JSON in {benchmark_dir_path}. Skipping benchmark size '{test_name} ({size})'.",
file=sys.stderr,
)
continue
if not html_path.is_file():
print(
f"Warning: Could not find HTML report at expected location {html_path}. Skipping benchmark size '{test_name} ({size})'.",
file=sys.stderr,
)
continue
try:
with json_path.open("r", encoding="utf-8") as f:
json_data = json.load(f)
results[test_name][size] = {
"json": json_data,
"html_path_relative_to_criterion_root": str(
Path(benchmark_dir_name) / REPORT_HTML_RELATIVE_PATH
).replace("\\", "/"),
}
except json.JSONDecodeError:
print(f"Error: Failed to decode JSON from {json_path}", file=sys.stderr)
except IOError as e:
print(f"Error: Failed to read file {json_path}: {e}", file=sys.stderr)
except Exception as e:
print(
f"Error: An unexpected error occurred loading {json_path}: {e}",
file=sys.stderr,
)
return dict(results)
def format_nanoseconds(ns: float) -> str:
"""Formats nanoseconds into a human-readable string with units."""
if pd.isna(ns):
return "-"
if ns < 1_000:
return f"{ns:.2f} ns"
elif ns < 1_000_000:
return f"{ns / 1_000:.2f} µs"
elif ns < 1_000_000_000:
return f"{ns / 1_000_000:.2f} ms"
else:
return f"{ns / 1_000_000_000:.2f} s"
def generate_html_table_with_links(
results: Dict[str, Dict[str, Dict[str, Any]]], html_base_path: str
) -> str:
"""
Generates a full HTML page with a styled table from benchmark results.
"""
css_styles = """
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
line-height: 1.6;
margin: 0;
padding: 20px;
background-color: #f4f7f6;
color: #333;
}
.container {
max-width: 1200px;
margin: 20px auto;
padding: 20px;
background-color: #fff;
box-shadow: 0 0 15px rgba(0,0,0,0.1);
border-radius: 8px;
}
h1 {
color: #2c3e50;
text-align: center;
margin-bottom: 10px;
}
p.subtitle {
text-align: center;
margin-bottom: 8px;
color: #555;
font-size: 0.95em;
}
p.note {
text-align: center;
margin-bottom: 25px;
color: #777;
font-size: 0.85em;
}
.benchmark-table {
width: 100%;
border-collapse: collapse;
margin-top: 25px;
box-shadow: 0 2px 8px rgba(0,0,0,0.05);
}
.benchmark-table th, .benchmark-table td {
border: 1px solid #dfe6e9; /* Lighter border */
padding: 12px 15px;
}
.benchmark-table th {
background-color: #3498db; /* Primary blue */
color: #ffffff;
font-weight: 600; /* Slightly bolder */
text-transform: uppercase;
letter-spacing: 0.05em;
text-align: center; /* Center align headers */
}
.benchmark-table td {
text-align: right; /* Default for data cells (times) */
}
.benchmark-table td:first-child { /* Benchmark Name column */
font-weight: 500;
color: #2d3436;
text-align: left; /* Left align benchmark names */
}
.benchmark-table tbody tr:nth-child(even) {
background-color: #f8f9fa; /* Very light grey for even rows */
}
.benchmark-table tbody tr:hover {
background-color: #e9ecef; /* Slightly darker on hover */
}
.benchmark-table a {
color: #2980b9; /* Link blue */
text-decoration: none;
font-weight: 500;
}
.benchmark-table a:hover {
text-decoration: underline;
color: #1c5a81; /* Darker blue on hover */
}
.no-results {
text-align: center;
font-size: 1.2em;
color: #7f8c8d;
margin-top: 30px;
}
</style>
"""
html_doc_start = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Criterion Benchmark Results</title>
{css_styles}
</head>
<body>
<div class="container">
<h1 id="criterion-benchmark-results">Criterion Benchmark Results</h1>
"""
html_doc_end = """
</div>
</body>
</html>"""
if not results:
return f"""{html_doc_start}
<p class="no-results">No benchmark results found or loaded.</p>
{html_doc_end}"""
all_sizes = sorted(
list(set(size for test_data in results.values() for size in test_data.keys())),
key=(lambda x: int(x.split("x")[0])),
)
all_test_names = sorted(list(results.keys()))
table_content = """
<p class="subtitle">Each cell links to the detailed Criterion.rs report for that specific benchmark size.</p>
<p class="note">Note: Values shown are the midpoint of the mean confidence interval, formatted for readability.</p>
<p class="note"><a href="report/index.html">[Switch to the standard Criterion.rs report]</a></p>
<table class="benchmark-table">
<thead>
<tr>
<th>Benchmark Name</th>
"""
for size in all_sizes:
table_content += f"<th>{html.escape(size)}</th>\n"
table_content += """
</tr>
</thead>
<tbody>
"""
for test_name in all_test_names:
table_content += f"<tr>\n"
table_content += f" <td>{html.escape(test_name)}</td>\n"
for size in all_sizes:
cell_data = results.get(test_name, {}).get(size)
mean_value = pd.NA
full_report_url = "#"
if (
cell_data
and "json" in cell_data
and "html_path_relative_to_criterion_root" in cell_data
):
try:
mean_data = cell_data["json"].get("mean")
if mean_data and "confidence_interval" in mean_data:
ci = mean_data["confidence_interval"]
if "lower_bound" in ci and "upper_bound" in ci:
lower, upper = ci["lower_bound"], ci["upper_bound"]
if isinstance(lower, (int, float)) and isinstance(
upper, (int, float)
):
mean_value = (lower + upper) / 2.0
else:
print(
f"Warning: Non-numeric bounds for {test_name} ({size}).",
file=sys.stderr,
)
else:
print(
f"Warning: Missing confidence_interval bounds for {test_name} ({size}).",
file=sys.stderr,
)
else:
print(
f"Warning: Missing 'mean' data for {test_name} ({size}).",
file=sys.stderr,
)
relative_report_path = cell_data[
"html_path_relative_to_criterion_root"
]
joined_path = Path(html_base_path) / relative_report_path
full_report_url = str(joined_path).replace("\\", "/")
except Exception as e:
print(
f"Error processing cell data for {test_name} ({size}): {e}",
file=sys.stderr,
)
formatted_mean = format_nanoseconds(mean_value)
if full_report_url and full_report_url != "#":
table_content += f' <td><a href="{html.escape(full_report_url)}">{html.escape(formatted_mean)}</a></td>\n'
else:
table_content += f" <td>{html.escape(formatted_mean)}</td>\n"
table_content += "</tr>\n"
table_content += """
</tbody>
</table>
"""
return f"{html_doc_start}{table_content}{html_doc_end}"
if __name__ == "__main__":
DEFAULT_CRITERION_PATH = "target/criterion"
DEFAULT_OUTPUT_FILE = "./target/criterion/index.html"
DEFAULT_HTML_BASE_PATH = ""
parser = argparse.ArgumentParser(
description="Load Criterion benchmark results from JSON files and generate an HTML table with links to reports."
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Perform a dry run without writing the HTML file.",
)
parser.add_argument(
"--criterion-dir",
type=str,
default=DEFAULT_CRITERION_PATH,
help=f"Path to the main 'target/criterion' directory (default: {DEFAULT_CRITERION_PATH}) containing benchmark data.",
)
parser.add_argument(
"--html-base-path",
type=str,
default=DEFAULT_HTML_BASE_PATH,
help=(
f"Prefix for HTML links to individual benchmark reports. "
f"This is prepended to each report's relative path (e.g., 'benchmark_name/report/index.html'). "
f"If the main output HTML (default: '{DEFAULT_OUTPUT_FILE}') is in the 'target/criterion/' directory, "
f"this should typically be empty (default: '{DEFAULT_HTML_BASE_PATH}'). "
),
)
parser.add_argument(
"--output-file",
type=str,
default=DEFAULT_OUTPUT_FILE,
help=f"Path to save the generated HTML summary report (default: {DEFAULT_OUTPUT_FILE}).",
)
args = parser.parse_args()
if args.dry_run:
print(
"Dry run mode: No files will be written. Use --dry-run to skip writing the HTML file."
)
sys.exit(0)
criterion_path = Path(args.criterion_dir)
output_file_path = Path(args.output_file)
try:
output_file_path.parent.mkdir(parents=True, exist_ok=True)
except OSError as e:
print(
f"Error: Could not create output directory {output_file_path.parent}: {e}",
file=sys.stderr,
)
sys.exit(1)
all_results = load_criterion_reports(criterion_path)
# Generate HTML output regardless of whether results were found (handles "no results" page)
html_output = generate_html_table_with_links(all_results, args.html_base_path)
if not all_results:
print("\nNo benchmark results found or loaded.")
# Fallthrough to write the "no results" page generated by generate_html_table_with_links
else:
print("\nSuccessfully loaded benchmark results.")
# pprint(all_results) # Uncomment for debugging
print(
f"Generating HTML report with links using HTML base path: '{args.html_base_path}'"
)
try:
with output_file_path.open("w", encoding="utf-8") as f:
f.write(html_output)
print(f"\nSuccessfully wrote HTML report to {output_file_path}")
if not all_results:
sys.exit(1) # Exit with error code if no results, though file is created
sys.exit(0)
except IOError as e:
print(f"Error writing HTML output to {output_file_path}: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"An unexpected error occurred while writing HTML: {e}", file=sys.stderr)
sys.exit(1)

View File

@ -10,6 +10,10 @@ on:
# pull_request:
# branches: [main]
workflow_dispatch:
workflow_run:
workflows: ["run-benchmarks"]
types:
- completed
permissions:
contents: read
@ -100,6 +104,36 @@ jobs:
<(echo '{}') \
> last-commit-date.json
- name: Download last available benchmark report
run: |
artifact_url=$(gh api -H "Accept: application/vnd.github+json" \
/repos/${{ github.repository }}/actions/artifacts \
| jq -r '.artifacts[] | select(.name | startswith("benchmark-reports")) | .archive_download_url' | head -n 1)
if [ -z "$artifact_url" ]; then
echo "No benchmark artifact found!"
exit 1
fi
curl -L -H "Authorization: Bearer ${{ secrets.CUSTOM_GH_TOKEN }}" \
"$artifact_url" -o benchmark-report.zip
# Print all files in the current directory
echo "Files in the current directory:"
ls -al
# check if the zip file is valid
if ! unzip -tq benchmark-report.zip; then
echo "benchmark-report.zip is invalid or corrupted!"
exit 1
fi
unzip -q benchmark-report.zip -d benchmark-report
# echo "<meta http-equiv=\"refresh\" content=\"0; url=report/index.html\">" > benchmark-report/index.html
- name: Copy files to output directory
run: |
# mkdir docs
@ -113,16 +147,16 @@ jobs:
cp last-commit-date.json target/doc/docs/
# cp -r .github target/doc/docs
cp .github/rustframe_logo.png target/doc/docs/
echo "<meta http-equiv=\"refresh\" content=\"0; url=docs\">" > target/doc/index.html
# echo "<meta http-equiv=\"refresh\" content=\"0; url=docs\">" > target/doc/index.html
touch target/doc/.nojekyll
# verify that logo exists in the output directory
- name: Verify logo directory
# copy the benchmark report to the output directory
cp -r benchmark-report target/doc/
- name: Add index.html to output directory
run: |
if [ ! -f target/doc/docs/rustframe_logo.png ]; then
echo "Logo not found in output directory!"
exit 1
fi
cp .github/htmldocs/index.html target/doc/index.html
cp .github/rustframe_logo.png target/doc/rustframe_logo.png
- name: Upload Pages artifact
# if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'

View File

@ -1,4 +1,4 @@
name: Run benchmarks
name: run-benchmarks
on:
workflow_dispatch:
@ -33,9 +33,29 @@ jobs:
with:
toolchain: stable
- name: Install Python
uses: actions/setup-python@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
- name: Setup venv
run: |
uv venv
uv pip install pandas
uv run .github/scripts/custom_benchmark_report.py --dry-run
- name: Run benchmarks
run: cargo bench --features bench
- name: Generate custom benchmark reports
run: |
if [ -d ./target/criterion ]; then
echo "Found benchmark reports, generating custom report..."
else
echo "No benchmark reports found, skipping custom report generation."
exit 1
fi
uv run .github/scripts/custom_benchmark_report.py
- name: Upload benchmark reports
uses: actions/upload-artifact@v4
with:

4
.gitignore vendored
View File

@ -14,4 +14,6 @@ data/
.venv/
.vscode/
tarpaulin-report.*
tarpaulin-report.*
.github/htmldocs/rustframe_logo.png

View File

@ -1,35 +1,22 @@
// Combined benchmarks for rustframe
// Combined benchmarks
use chrono::NaiveDate;
use criterion::{criterion_group, criterion_main, Criterion};
// Import Duration for measurement_time and warm_up_time
use rustframe::{
frame::{Frame, RowIndex},
matrix::{BoolMatrix, Matrix},
matrix::{BoolMatrix, Matrix, SeriesOps},
utils::{BDateFreq, BDatesList},
};
use std::time::Duration;
// You can define a custom Criterion configuration function
// This will be passed to the criterion_group! macro
pub fn for_short_runs() -> Criterion {
Criterion::default()
// (samples != total iterations)
// limits the number of statistical data points.
.sample_size(50)
// measurement time per sample
.measurement_time(Duration::from_millis(2000))
// reduce warm-up time as well for faster overall run
.warm_up_time(Duration::from_millis(50))
// You could also make it much shorter if needed, e.g., 50ms measurement, 100ms warm-up
// .measurement_time(Duration::from_millis(50))
// .warm_up_time(Duration::from_millis(100))
}
// Define size categories
const SIZES_SMALL: [usize; 1] = [1];
const SIZES_MEDIUM: [usize; 3] = [100, 250, 500];
const SIZES_LARGE: [usize; 1] = [1000];
fn bool_matrix_operations_benchmark(c: &mut Criterion) {
let sizes = [1, 100, 1000];
// let sizes = [1000];
for &size in &sizes {
// Modified benchmark functions to accept a slice of sizes
fn bool_matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
for &size in sizes {
let data1: Vec<bool> = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec<bool> = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@ -61,11 +48,8 @@ fn bool_matrix_operations_benchmark(c: &mut Criterion) {
}
}
fn matrix_boolean_operations_benchmark(c: &mut Criterion) {
let sizes = [1, 100, 1000];
// let sizes = [1000];
for &size in &sizes {
fn matrix_boolean_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
for &size in sizes {
let data1: Vec<bool> = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec<bool> = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@ -97,11 +81,8 @@ fn matrix_boolean_operations_benchmark(c: &mut Criterion) {
}
}
fn matrix_operations_benchmark(c: &mut Criterion) {
let sizes = [1, 100, 1000];
// let sizes = [1000];
for &size in &sizes {
fn matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
for &size in sizes {
let data: Vec<f64> = (0..size * size).map(|x| x as f64).collect();
let ma = Matrix::from_vec(data.clone(), size, size);
@ -130,8 +111,7 @@ fn matrix_operations_benchmark(c: &mut Criterion) {
});
}
// Benchmarking matrix addition
for &size in &sizes {
for &size in sizes {
let data1: Vec<f64> = (0..size * size).map(|x| x as f64).collect();
let data2: Vec<f64> = (0..size * size).map(|x| (x + 1) as f64).collect();
let ma = Matrix::from_vec(data1.clone(), size, size);
@ -163,44 +143,136 @@ fn matrix_operations_benchmark(c: &mut Criterion) {
}
}
fn benchmark_frame_operations(c: &mut Criterion) {
let n_periods = 1000;
let n_cols = 1000;
fn generate_frame(size: usize) -> Frame<f64> {
let data: Vec<f64> = (0..size * size).map(|x| x as f64).collect();
let dates: Vec<NaiveDate> =
BDatesList::from_n_periods("2024-01-02".to_string(), BDateFreq::Daily, n_periods)
BDatesList::from_n_periods("2000-01-01".to_string(), BDateFreq::Daily, size)
.unwrap()
.list()
.unwrap();
// let col_names= str(i) for i in range(1, 1000)
let col_names: Vec<String> = (1..=n_cols).map(|i| format!("col_{}", i)).collect();
let data1: Vec<f64> = (0..n_periods * n_cols).map(|x| x as f64).collect();
let data2: Vec<f64> = (0..n_periods * n_cols).map(|x| (x + 1) as f64).collect();
let ma = Matrix::from_vec(data1.clone(), n_periods, n_cols);
let mb = Matrix::from_vec(data2.clone(), n_periods, n_cols);
let fa = Frame::new(
ma.clone(),
col_names.clone(),
Some(RowIndex::Date(dates.clone())),
);
let fb = Frame::new(mb, col_names, Some(RowIndex::Date(dates)));
c.bench_function("frame element-wise multiply (1000x1000)", |b| {
b.iter(|| {
let _result = &fa * &fb;
});
});
let col_names: Vec<String> = (1..=size).map(|i| format!("col_{}", i)).collect();
Frame::new(
Matrix::from_vec(data.clone(), size, size),
col_names,
Some(RowIndex::Date(dates)),
)
}
// Define the criterion group and pass the custom configuration function
fn benchmark_frame_operations(c: &mut Criterion, sizes: &[usize]) {
for &size in sizes {
let fa = generate_frame(size);
let fb = generate_frame(size);
c.bench_function(&format!("frame add ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa + &fb;
});
});
c.bench_function(&format!("frame subtract ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa - &fb;
});
});
c.bench_function(&format!("frame multiply ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa * &fb;
});
});
c.bench_function(&format!("frame divide ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa / &fb;
});
});
c.bench_function(&format!("frame sum_horizontal ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.sum_horizontal();
});
});
c.bench_function(&format!("frame sum_vertical ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.sum_vertical();
});
});
c.bench_function(&format!("frame prod_horizontal ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.prod_horizontal();
});
});
c.bench_function(&format!("frame prod_vertical ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.prod_vertical();
});
});
}
}
// Runner functions for each size category
fn run_benchmarks_small(c: &mut Criterion) {
bool_matrix_operations_benchmark(c, &SIZES_SMALL);
matrix_boolean_operations_benchmark(c, &SIZES_SMALL);
matrix_operations_benchmark(c, &SIZES_SMALL);
benchmark_frame_operations(c, &SIZES_SMALL);
}
fn run_benchmarks_medium(c: &mut Criterion) {
bool_matrix_operations_benchmark(c, &SIZES_MEDIUM);
matrix_boolean_operations_benchmark(c, &SIZES_MEDIUM);
matrix_operations_benchmark(c, &SIZES_MEDIUM);
benchmark_frame_operations(c, &SIZES_MEDIUM);
}
fn run_benchmarks_large(c: &mut Criterion) {
bool_matrix_operations_benchmark(c, &SIZES_LARGE);
matrix_boolean_operations_benchmark(c, &SIZES_LARGE);
matrix_operations_benchmark(c, &SIZES_LARGE);
benchmark_frame_operations(c, &SIZES_LARGE);
}
// Configuration functions for different size categories
fn config_small_arrays() -> Criterion {
Criterion::default()
.sample_size(500)
.measurement_time(Duration::from_millis(100))
.warm_up_time(Duration::from_millis(5))
}
fn config_medium_arrays() -> Criterion {
Criterion::default()
.sample_size(100)
.measurement_time(Duration::from_millis(2000))
.warm_up_time(Duration::from_millis(100))
}
fn config_large_arrays() -> Criterion {
Criterion::default()
.sample_size(50)
.measurement_time(Duration::from_millis(5000))
.warm_up_time(Duration::from_millis(200))
}
criterion_group!(
name = combined_benches;
config = for_short_runs(); // Use the custom configuration here
targets = bool_matrix_operations_benchmark,
matrix_boolean_operations_benchmark,
matrix_operations_benchmark,
benchmark_frame_operations
name = benches_small_arrays;
config = config_small_arrays();
targets = run_benchmarks_small
);
criterion_group!(
name = benches_medium_arrays;
config = config_medium_arrays();
targets = run_benchmarks_medium
);
criterion_group!(
name = benches_large_arrays;
config = config_large_arrays();
targets = run_benchmarks_large
);
criterion_main!(
benches_small_arrays,
benches_medium_arrays,
benches_large_arrays
);
criterion_main!(combined_benches);

View File

@ -2,7 +2,7 @@
use std::ops::{Add, BitAnd, BitOr, BitXor, Div, Index, IndexMut, Mul, Not, Sub};
/// A columnmajor 2D matrix of `T`
/// A columnmajor 2D matrix of `T`. Index as `Array(row, column)`.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Matrix<T> {
rows: usize,