Merge branch 'main' into date_utils

This commit is contained in:
Palash Tyagi 2025-05-11 02:00:34 +01:00 committed by GitHub
commit 77983eef77
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 732 additions and 81 deletions

74
.github/htmldocs/index.html vendored Normal file
View File

@ -0,0 +1,74 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Rustframe</title>
<link rel="icon" type="image/png" href="./rustframe_logo.png">
<style>
body {
font-family: Arial, sans-serif;
background-color: #2b2b2b;
color: #d4d4d4;
margin: 0;
padding: 0;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
}
main {
text-align: center;
padding: 20px;
background-color: #3c3c3c;
border-radius: 10px;
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2);
max-width: 600px;
}
img {
max-width: 100px;
margin-bottom: 20px;
}
h1 {
/* logo is b35f20 */
color: #f8813f;
}
a {
color: #ff9a60;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
</style>
</head>
<body>
<main>
<h1>
<img src="./rustframe_logo.png" alt="Rustframe Logo"><br>
Rustframe
</h1>
<h2>A lightweight dataframe & math toolkit for Rust</h2>
<hr style="border: 1px solid #d4d4d4; margin: 20px 0;">
<p>
📚 <a href="https://magnus167.github.io/rustframe/docs">Docs</a> |
📊 <a href="https://magnus167.github.io/rustframe/benchmark-report/">Benchmarks</a>
<br><br>
🦀 <a href="https://crates.io/crates/rustframe">Crates.io</a> |
🔖 <a href="https://docs.rs/rustframe/latest/rustframe/">docs.rs</a>
<br><br>
🐙 <a href="https://github.com/Magnus167/rustframe">GitHub</a> |
🌐 <a href="https://gitea.nulltech.uk/Magnus167/rustframe">Gitea mirror</a>
</p>
</main>
</body>
</html>

View File

@ -7,7 +7,7 @@ ARG DEBIAN_FRONTEND=noninteractive
RUN apt update -y && apt upgrade -y && useradd -m docker RUN apt update -y && apt upgrade -y && useradd -m docker
RUN apt install -y --no-install-recommends \ RUN apt install -y --no-install-recommends \
curl jq git \ curl jq git unzip \
# dev dependencies # dev dependencies
build-essential libssl-dev libffi-dev python3 python3-venv python3-dev python3-pip \ build-essential libssl-dev libffi-dev python3 python3-venv python3-dev python3-pip \
# dot net core dependencies # dot net core dependencies
@ -15,6 +15,12 @@ RUN apt install -y --no-install-recommends \
# Rust and Cargo dependencies # Rust and Cargo dependencies
gcc cmake gcc cmake
# Install GitHub CLI
RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
&& chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
&& apt update -y && apt install -y gh \
&& rm -rf /var/lib/apt/lists/*
# Install Rust and Cargo # Install Rust and Cargo
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y

View File

@ -0,0 +1,426 @@
# create_benchmark_table.py
import argparse
import json
import re
import sys
from pathlib import Path
from pprint import pprint
from collections import defaultdict
from typing import Dict, Any, Optional
import pandas as pd
import html # Import the html module for escaping
# Regular expression to parse "test_name (size)" format
DIR_PATTERN = re.compile(r"^(.*?) \((.*?)\)$")
# Standard location for criterion estimates relative to the benchmark dir
ESTIMATES_PATH_NEW = Path("new") / "estimates.json"
# Fallback location (older versions or baseline comparisons)
ESTIMATES_PATH_BASE = Path("base") / "estimates.json"
# Standard location for the HTML report relative to the benchmark's specific directory
REPORT_HTML_RELATIVE_PATH = Path("report") / "index.html"
def get_default_criterion_report_path() -> Path:
"""
Returns the default path for the Criterion benchmark report.
This is typically 'target/criterion'.
"""
return Path("target") / "criterion" / "report" / "index.html"
def load_criterion_reports(
criterion_root_dir: Path,
) -> Dict[str, Dict[str, Dict[str, Any]]]:
"""
Loads Criterion benchmark results from a specified directory and finds HTML paths.
Args:
criterion_root_dir: The Path object pointing to the main
'target/criterion' directory.
Returns:
A nested dictionary structured as:
{ test_name: { size: {'json': json_content, 'html_path': relative_html_path}, ... }, ... }
Returns an empty dict if the root directory is not found or empty.
"""
results: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict)
if not criterion_root_dir.is_dir():
print(
f"Error: Criterion root directory not found or is not a directory: {criterion_root_dir}",
file=sys.stderr,
)
return {}
print(f"Scanning for benchmark reports in: {criterion_root_dir}")
for item in criterion_root_dir.iterdir():
if not item.is_dir():
continue
match = DIR_PATTERN.match(item.name)
if not match:
continue
test_name = match.group(1).strip()
size = match.group(2).strip()
benchmark_dir_name = item.name
benchmark_dir_path = item
json_path: Optional[Path] = None
if (benchmark_dir_path / ESTIMATES_PATH_NEW).is_file():
json_path = benchmark_dir_path / ESTIMATES_PATH_NEW
elif (benchmark_dir_path / ESTIMATES_PATH_BASE).is_file():
json_path = benchmark_dir_path / ESTIMATES_PATH_BASE
html_path = benchmark_dir_path / REPORT_HTML_RELATIVE_PATH
if json_path is None or not json_path.is_file():
print(
f"Warning: Could not find estimates JSON in {benchmark_dir_path}. Skipping benchmark size '{test_name} ({size})'.",
file=sys.stderr,
)
continue
if not html_path.is_file():
print(
f"Warning: Could not find HTML report at expected location {html_path}. Skipping benchmark size '{test_name} ({size})'.",
file=sys.stderr,
)
continue
try:
with json_path.open("r", encoding="utf-8") as f:
json_data = json.load(f)
results[test_name][size] = {
"json": json_data,
"html_path_relative_to_criterion_root": str(
Path(benchmark_dir_name) / REPORT_HTML_RELATIVE_PATH
).replace("\\", "/"),
}
except json.JSONDecodeError:
print(f"Error: Failed to decode JSON from {json_path}", file=sys.stderr)
except IOError as e:
print(f"Error: Failed to read file {json_path}: {e}", file=sys.stderr)
except Exception as e:
print(
f"Error: An unexpected error occurred loading {json_path}: {e}",
file=sys.stderr,
)
return dict(results)
def format_nanoseconds(ns: float) -> str:
"""Formats nanoseconds into a human-readable string with units."""
if pd.isna(ns):
return "-"
if ns < 1_000:
return f"{ns:.2f} ns"
elif ns < 1_000_000:
return f"{ns / 1_000:.2f} µs"
elif ns < 1_000_000_000:
return f"{ns / 1_000_000:.2f} ms"
else:
return f"{ns / 1_000_000_000:.2f} s"
def generate_html_table_with_links(
results: Dict[str, Dict[str, Dict[str, Any]]], html_base_path: str
) -> str:
"""
Generates a full HTML page with a styled table from benchmark results.
"""
css_styles = """
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
line-height: 1.6;
margin: 0;
padding: 20px;
background-color: #f4f7f6;
color: #333;
}
.container {
max-width: 1200px;
margin: 20px auto;
padding: 20px;
background-color: #fff;
box-shadow: 0 0 15px rgba(0,0,0,0.1);
border-radius: 8px;
}
h1 {
color: #2c3e50;
text-align: center;
margin-bottom: 10px;
}
p.subtitle {
text-align: center;
margin-bottom: 8px;
color: #555;
font-size: 0.95em;
}
p.note {
text-align: center;
margin-bottom: 25px;
color: #777;
font-size: 0.85em;
}
.benchmark-table {
width: 100%;
border-collapse: collapse;
margin-top: 25px;
box-shadow: 0 2px 8px rgba(0,0,0,0.05);
}
.benchmark-table th, .benchmark-table td {
border: 1px solid #dfe6e9; /* Lighter border */
padding: 12px 15px;
}
.benchmark-table th {
background-color: #3498db; /* Primary blue */
color: #ffffff;
font-weight: 600; /* Slightly bolder */
text-transform: uppercase;
letter-spacing: 0.05em;
text-align: center; /* Center align headers */
}
.benchmark-table td {
text-align: right; /* Default for data cells (times) */
}
.benchmark-table td:first-child { /* Benchmark Name column */
font-weight: 500;
color: #2d3436;
text-align: left; /* Left align benchmark names */
}
.benchmark-table tbody tr:nth-child(even) {
background-color: #f8f9fa; /* Very light grey for even rows */
}
.benchmark-table tbody tr:hover {
background-color: #e9ecef; /* Slightly darker on hover */
}
.benchmark-table a {
color: #2980b9; /* Link blue */
text-decoration: none;
font-weight: 500;
}
.benchmark-table a:hover {
text-decoration: underline;
color: #1c5a81; /* Darker blue on hover */
}
.no-results {
text-align: center;
font-size: 1.2em;
color: #7f8c8d;
margin-top: 30px;
}
</style>
"""
html_doc_start = f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Criterion Benchmark Results</title>
{css_styles}
</head>
<body>
<div class="container">
<h1 id="criterion-benchmark-results">Criterion Benchmark Results</h1>
"""
html_doc_end = """
</div>
</body>
</html>"""
if not results:
return f"""{html_doc_start}
<p class="no-results">No benchmark results found or loaded.</p>
{html_doc_end}"""
all_sizes = sorted(
list(set(size for test_data in results.values() for size in test_data.keys())),
key=(lambda x: int(x.split("x")[0])),
)
all_test_names = sorted(list(results.keys()))
table_content = """
<p class="subtitle">Each cell links to the detailed Criterion.rs report for that specific benchmark size.</p>
<p class="note">Note: Values shown are the midpoint of the mean confidence interval, formatted for readability.</p>
<p class="note"><a href="report/index.html">[Switch to the standard Criterion.rs report]</a></p>
<table class="benchmark-table">
<thead>
<tr>
<th>Benchmark Name</th>
"""
for size in all_sizes:
table_content += f"<th>{html.escape(size)}</th>\n"
table_content += """
</tr>
</thead>
<tbody>
"""
for test_name in all_test_names:
table_content += f"<tr>\n"
table_content += f" <td>{html.escape(test_name)}</td>\n"
for size in all_sizes:
cell_data = results.get(test_name, {}).get(size)
mean_value = pd.NA
full_report_url = "#"
if (
cell_data
and "json" in cell_data
and "html_path_relative_to_criterion_root" in cell_data
):
try:
mean_data = cell_data["json"].get("mean")
if mean_data and "confidence_interval" in mean_data:
ci = mean_data["confidence_interval"]
if "lower_bound" in ci and "upper_bound" in ci:
lower, upper = ci["lower_bound"], ci["upper_bound"]
if isinstance(lower, (int, float)) and isinstance(
upper, (int, float)
):
mean_value = (lower + upper) / 2.0
else:
print(
f"Warning: Non-numeric bounds for {test_name} ({size}).",
file=sys.stderr,
)
else:
print(
f"Warning: Missing confidence_interval bounds for {test_name} ({size}).",
file=sys.stderr,
)
else:
print(
f"Warning: Missing 'mean' data for {test_name} ({size}).",
file=sys.stderr,
)
relative_report_path = cell_data[
"html_path_relative_to_criterion_root"
]
joined_path = Path(html_base_path) / relative_report_path
full_report_url = str(joined_path).replace("\\", "/")
except Exception as e:
print(
f"Error processing cell data for {test_name} ({size}): {e}",
file=sys.stderr,
)
formatted_mean = format_nanoseconds(mean_value)
if full_report_url and full_report_url != "#":
table_content += f' <td><a href="{html.escape(full_report_url)}">{html.escape(formatted_mean)}</a></td>\n'
else:
table_content += f" <td>{html.escape(formatted_mean)}</td>\n"
table_content += "</tr>\n"
table_content += """
</tbody>
</table>
"""
return f"{html_doc_start}{table_content}{html_doc_end}"
if __name__ == "__main__":
DEFAULT_CRITERION_PATH = "target/criterion"
DEFAULT_OUTPUT_FILE = "./target/criterion/index.html"
DEFAULT_HTML_BASE_PATH = ""
parser = argparse.ArgumentParser(
description="Load Criterion benchmark results from JSON files and generate an HTML table with links to reports."
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Perform a dry run without writing the HTML file.",
)
parser.add_argument(
"--criterion-dir",
type=str,
default=DEFAULT_CRITERION_PATH,
help=f"Path to the main 'target/criterion' directory (default: {DEFAULT_CRITERION_PATH}) containing benchmark data.",
)
parser.add_argument(
"--html-base-path",
type=str,
default=DEFAULT_HTML_BASE_PATH,
help=(
f"Prefix for HTML links to individual benchmark reports. "
f"This is prepended to each report's relative path (e.g., 'benchmark_name/report/index.html'). "
f"If the main output HTML (default: '{DEFAULT_OUTPUT_FILE}') is in the 'target/criterion/' directory, "
f"this should typically be empty (default: '{DEFAULT_HTML_BASE_PATH}'). "
),
)
parser.add_argument(
"--output-file",
type=str,
default=DEFAULT_OUTPUT_FILE,
help=f"Path to save the generated HTML summary report (default: {DEFAULT_OUTPUT_FILE}).",
)
args = parser.parse_args()
if args.dry_run:
print(
"Dry run mode: No files will be written. Use --dry-run to skip writing the HTML file."
)
sys.exit(0)
criterion_path = Path(args.criterion_dir)
output_file_path = Path(args.output_file)
try:
output_file_path.parent.mkdir(parents=True, exist_ok=True)
except OSError as e:
print(
f"Error: Could not create output directory {output_file_path.parent}: {e}",
file=sys.stderr,
)
sys.exit(1)
all_results = load_criterion_reports(criterion_path)
# Generate HTML output regardless of whether results were found (handles "no results" page)
html_output = generate_html_table_with_links(all_results, args.html_base_path)
if not all_results:
print("\nNo benchmark results found or loaded.")
# Fallthrough to write the "no results" page generated by generate_html_table_with_links
else:
print("\nSuccessfully loaded benchmark results.")
# pprint(all_results) # Uncomment for debugging
print(
f"Generating HTML report with links using HTML base path: '{args.html_base_path}'"
)
try:
with output_file_path.open("w", encoding="utf-8") as f:
f.write(html_output)
print(f"\nSuccessfully wrote HTML report to {output_file_path}")
if not all_results:
sys.exit(1) # Exit with error code if no results, though file is created
sys.exit(0)
except IOError as e:
print(f"Error writing HTML output to {output_file_path}: {e}", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"An unexpected error occurred while writing HTML: {e}", file=sys.stderr)
sys.exit(1)

View File

@ -10,6 +10,10 @@ on:
# pull_request: # pull_request:
# branches: [main] # branches: [main]
workflow_dispatch: workflow_dispatch:
workflow_run:
workflows: ["run-benchmarks"]
types:
- completed
permissions: permissions:
contents: read contents: read
@ -44,6 +48,14 @@ jobs:
toolchain: stable toolchain: stable
override: true override: true
- name: Replace logo URL in README.md
env:
LOGO_URL: ${{ secrets.LOGO_URL }}
run: |
# replace with EXAMPLE.COM/LOGO
sed -i 's|.github/rustframe_logo.png|rustframe_logo.png|g' README.md
- name: Build documentation - name: Build documentation
run: cargo doc --no-deps --release run: cargo doc --no-deps --release
@ -92,20 +104,59 @@ jobs:
<(echo '{}') \ <(echo '{}') \
> last-commit-date.json > last-commit-date.json
- name: Download last available benchmark report
run: |
artifact_url=$(gh api -H "Accept: application/vnd.github+json" \
/repos/${{ github.repository }}/actions/artifacts \
| jq -r '.artifacts[] | select(.name | startswith("benchmark-reports")) | .archive_download_url' | head -n 1)
if [ -z "$artifact_url" ]; then
echo "No benchmark artifact found!"
exit 1
fi
curl -L -H "Authorization: Bearer ${{ secrets.CUSTOM_GH_TOKEN }}" \
"$artifact_url" -o benchmark-report.zip
# Print all files in the current directory
echo "Files in the current directory:"
ls -al
# check if the zip file is valid
if ! unzip -tq benchmark-report.zip; then
echo "benchmark-report.zip is invalid or corrupted!"
exit 1
fi
unzip -q benchmark-report.zip -d benchmark-report
# echo "<meta http-equiv=\"refresh\" content=\"0; url=report/index.html\">" > benchmark-report/index.html
- name: Copy files to output directory - name: Copy files to output directory
run: | run: |
# mkdir docs # mkdir docs
mkdir -p target/doc/docs mkdir -p target/doc/docs
cp -r target/doc/rustframe/* target/doc/docs/ mv target/doc/rustframe/* target/doc/docs/
mkdir output mkdir output
cp tarpaulin-report.html target/doc/docs/ cp tarpaulin-report.html target/doc/docs/
cp tarpaulin-report.json target/doc/docs/ cp tarpaulin-report.json target/doc/docs/
cp tarpaulin-badge.json target/doc/docs/ cp tarpaulin-badge.json target/doc/docs/
cp last-commit-date.json target/doc/docs/ cp last-commit-date.json target/doc/docs/
mkdir -p target/doc/docs/.github # cp -r .github target/doc/docs
cp .github/rustframe_logo.png target/doc/docs/.github/ cp .github/rustframe_logo.png target/doc/docs/
echo "<meta http-equiv=\"refresh\" content=\"0; url=docs\">" > target/doc/index.html # echo "<meta http-equiv=\"refresh\" content=\"0; url=docs\">" > target/doc/index.html
touch target/doc/.nojekyll
# copy the benchmark report to the output directory
cp -r benchmark-report target/doc/
- name: Add index.html to output directory
run: |
cp .github/htmldocs/index.html target/doc/index.html
cp .github/rustframe_logo.png target/doc/rustframe_logo.png
- name: Upload Pages artifact - name: Upload Pages artifact
# if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' # if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
@ -115,4 +166,4 @@ jobs:
- name: Deploy to GitHub Pages - name: Deploy to GitHub Pages
# if: github.event_name == 'push' || github.event_name == 'workflow_dispatch' # if: github.event_name == 'push' || github.event_name == 'workflow_dispatch'
uses: actions/deploy-pages@v4 uses: actions/deploy-pages@v4

View File

@ -1,10 +1,10 @@
name: Run benchmarks name: run-benchmarks
on: on:
workflow_dispatch: workflow_dispatch:
# push: push:
# branches: branches:
# - main - main
jobs: jobs:
pick-runner: pick-runner:
@ -33,8 +33,28 @@ jobs:
with: with:
toolchain: stable toolchain: stable
- name: Install Python
uses: actions/setup-python@v4
- name: Install uv
uses: astral-sh/setup-uv@v5
- name: Setup venv
run: |
uv venv
uv pip install pandas
uv run .github/scripts/custom_benchmark_report.py --dry-run
- name: Run benchmarks - name: Run benchmarks
run: cargo bench run: cargo bench --features bench
- name: Generate custom benchmark reports
run: |
if [ -d ./target/criterion ]; then
echo "Found benchmark reports, generating custom report..."
else
echo "No benchmark reports found, skipping custom report generation."
exit 1
fi
uv run .github/scripts/custom_benchmark_report.py
- name: Upload benchmark reports - name: Upload benchmark reports
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4

4
.gitignore vendored
View File

@ -14,4 +14,6 @@ data/
.venv/ .venv/
.vscode/ .vscode/
tarpaulin-report.* tarpaulin-report.*
.github/htmldocs/rustframe_logo.png

View File

@ -1,35 +1,22 @@
// Combined benchmarks for rustframe // Combined benchmarks
use chrono::NaiveDate; use chrono::NaiveDate;
use criterion::{criterion_group, criterion_main, Criterion}; use criterion::{criterion_group, criterion_main, Criterion};
// Import Duration for measurement_time and warm_up_time
use rustframe::{ use rustframe::{
frame::{Frame, RowIndex}, frame::{Frame, RowIndex},
matrix::{BoolMatrix, Matrix}, matrix::{BoolMatrix, Matrix, SeriesOps},
utils::{BDateFreq, BDatesList}, utils::{BDateFreq, BDatesList},
}; };
use std::time::Duration; use std::time::Duration;
// You can define a custom Criterion configuration function // Define size categories
// This will be passed to the criterion_group! macro const SIZES_SMALL: [usize; 1] = [1];
pub fn for_short_runs() -> Criterion { const SIZES_MEDIUM: [usize; 3] = [100, 250, 500];
Criterion::default() const SIZES_LARGE: [usize; 1] = [1000];
// (samples != total iterations)
// limits the number of statistical data points.
.sample_size(50)
// measurement time per sample
.measurement_time(Duration::from_millis(2000))
// reduce warm-up time as well for faster overall run
.warm_up_time(Duration::from_millis(50))
// You could also make it much shorter if needed, e.g., 50ms measurement, 100ms warm-up
// .measurement_time(Duration::from_millis(50))
// .warm_up_time(Duration::from_millis(100))
}
fn bool_matrix_operations_benchmark(c: &mut Criterion) { // Modified benchmark functions to accept a slice of sizes
let sizes = [1, 100, 1000]; fn bool_matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
// let sizes = [1000]; for &size in sizes {
for &size in &sizes {
let data1: Vec<bool> = (0..size * size).map(|x| x % 2 == 0).collect(); let data1: Vec<bool> = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec<bool> = (0..size * size).map(|x| x % 3 == 0).collect(); let data2: Vec<bool> = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size); let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@ -61,11 +48,8 @@ fn bool_matrix_operations_benchmark(c: &mut Criterion) {
} }
} }
fn matrix_boolean_operations_benchmark(c: &mut Criterion) { fn matrix_boolean_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
let sizes = [1, 100, 1000]; for &size in sizes {
// let sizes = [1000];
for &size in &sizes {
let data1: Vec<bool> = (0..size * size).map(|x| x % 2 == 0).collect(); let data1: Vec<bool> = (0..size * size).map(|x| x % 2 == 0).collect();
let data2: Vec<bool> = (0..size * size).map(|x| x % 3 == 0).collect(); let data2: Vec<bool> = (0..size * size).map(|x| x % 3 == 0).collect();
let bm1 = BoolMatrix::from_vec(data1.clone(), size, size); let bm1 = BoolMatrix::from_vec(data1.clone(), size, size);
@ -97,11 +81,8 @@ fn matrix_boolean_operations_benchmark(c: &mut Criterion) {
} }
} }
fn matrix_operations_benchmark(c: &mut Criterion) { fn matrix_operations_benchmark(c: &mut Criterion, sizes: &[usize]) {
let sizes = [1, 100, 1000]; for &size in sizes {
// let sizes = [1000];
for &size in &sizes {
let data: Vec<f64> = (0..size * size).map(|x| x as f64).collect(); let data: Vec<f64> = (0..size * size).map(|x| x as f64).collect();
let ma = Matrix::from_vec(data.clone(), size, size); let ma = Matrix::from_vec(data.clone(), size, size);
@ -130,8 +111,7 @@ fn matrix_operations_benchmark(c: &mut Criterion) {
}); });
} }
// Benchmarking matrix addition for &size in sizes {
for &size in &sizes {
let data1: Vec<f64> = (0..size * size).map(|x| x as f64).collect(); let data1: Vec<f64> = (0..size * size).map(|x| x as f64).collect();
let data2: Vec<f64> = (0..size * size).map(|x| (x + 1) as f64).collect(); let data2: Vec<f64> = (0..size * size).map(|x| (x + 1) as f64).collect();
let ma = Matrix::from_vec(data1.clone(), size, size); let ma = Matrix::from_vec(data1.clone(), size, size);
@ -163,44 +143,136 @@ fn matrix_operations_benchmark(c: &mut Criterion) {
} }
} }
fn benchmark_frame_operations(c: &mut Criterion) { fn generate_frame(size: usize) -> Frame<f64> {
let n_periods = 1000; let data: Vec<f64> = (0..size * size).map(|x| x as f64).collect();
let n_cols = 1000;
let dates: Vec<NaiveDate> = let dates: Vec<NaiveDate> =
BDatesList::from_n_periods("2024-01-02".to_string(), BDateFreq::Daily, n_periods) BDatesList::from_n_periods("2000-01-01".to_string(), BDateFreq::Daily, size)
.unwrap() .unwrap()
.list() .list()
.unwrap(); .unwrap();
let col_names: Vec<String> = (1..=size).map(|i| format!("col_{}", i)).collect();
// let col_names= str(i) for i in range(1, 1000) Frame::new(
let col_names: Vec<String> = (1..=n_cols).map(|i| format!("col_{}", i)).collect(); Matrix::from_vec(data.clone(), size, size),
col_names,
let data1: Vec<f64> = (0..n_periods * n_cols).map(|x| x as f64).collect(); Some(RowIndex::Date(dates)),
let data2: Vec<f64> = (0..n_periods * n_cols).map(|x| (x + 1) as f64).collect(); )
let ma = Matrix::from_vec(data1.clone(), n_periods, n_cols);
let mb = Matrix::from_vec(data2.clone(), n_periods, n_cols);
let fa = Frame::new(
ma.clone(),
col_names.clone(),
Some(RowIndex::Date(dates.clone())),
);
let fb = Frame::new(mb, col_names, Some(RowIndex::Date(dates)));
c.bench_function("frame element-wise multiply (1000x1000)", |b| {
b.iter(|| {
let _result = &fa * &fb;
});
});
} }
// Define the criterion group and pass the custom configuration function fn benchmark_frame_operations(c: &mut Criterion, sizes: &[usize]) {
for &size in sizes {
let fa = generate_frame(size);
let fb = generate_frame(size);
c.bench_function(&format!("frame add ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa + &fb;
});
});
c.bench_function(&format!("frame subtract ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa - &fb;
});
});
c.bench_function(&format!("frame multiply ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa * &fb;
});
});
c.bench_function(&format!("frame divide ({}x{})", size, size), |b| {
b.iter(|| {
let _result = &fa / &fb;
});
});
c.bench_function(&format!("frame sum_horizontal ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.sum_horizontal();
});
});
c.bench_function(&format!("frame sum_vertical ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.sum_vertical();
});
});
c.bench_function(&format!("frame prod_horizontal ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.prod_horizontal();
});
});
c.bench_function(&format!("frame prod_vertical ({}x{})", size, size), |b| {
b.iter(|| {
let _result = fa.prod_vertical();
});
});
}
}
// Runner functions for each size category
fn run_benchmarks_small(c: &mut Criterion) {
bool_matrix_operations_benchmark(c, &SIZES_SMALL);
matrix_boolean_operations_benchmark(c, &SIZES_SMALL);
matrix_operations_benchmark(c, &SIZES_SMALL);
benchmark_frame_operations(c, &SIZES_SMALL);
}
fn run_benchmarks_medium(c: &mut Criterion) {
bool_matrix_operations_benchmark(c, &SIZES_MEDIUM);
matrix_boolean_operations_benchmark(c, &SIZES_MEDIUM);
matrix_operations_benchmark(c, &SIZES_MEDIUM);
benchmark_frame_operations(c, &SIZES_MEDIUM);
}
fn run_benchmarks_large(c: &mut Criterion) {
bool_matrix_operations_benchmark(c, &SIZES_LARGE);
matrix_boolean_operations_benchmark(c, &SIZES_LARGE);
matrix_operations_benchmark(c, &SIZES_LARGE);
benchmark_frame_operations(c, &SIZES_LARGE);
}
// Configuration functions for different size categories
fn config_small_arrays() -> Criterion {
Criterion::default()
.sample_size(500)
.measurement_time(Duration::from_millis(100))
.warm_up_time(Duration::from_millis(5))
}
fn config_medium_arrays() -> Criterion {
Criterion::default()
.sample_size(100)
.measurement_time(Duration::from_millis(2000))
.warm_up_time(Duration::from_millis(100))
}
fn config_large_arrays() -> Criterion {
Criterion::default()
.sample_size(50)
.measurement_time(Duration::from_millis(5000))
.warm_up_time(Duration::from_millis(200))
}
criterion_group!( criterion_group!(
name = combined_benches; name = benches_small_arrays;
config = for_short_runs(); // Use the custom configuration here config = config_small_arrays();
targets = bool_matrix_operations_benchmark, targets = run_benchmarks_small
matrix_boolean_operations_benchmark, );
matrix_operations_benchmark, criterion_group!(
benchmark_frame_operations name = benches_medium_arrays;
config = config_medium_arrays();
targets = run_benchmarks_medium
);
criterion_group!(
name = benches_large_arrays;
config = config_large_arrays();
targets = run_benchmarks_large
);
criterion_main!(
benches_small_arrays,
benches_medium_arrays,
benches_large_arrays
); );
criterion_main!(combined_benches);

View File

@ -2,7 +2,7 @@
use std::ops::{Add, BitAnd, BitOr, BitXor, Div, Index, IndexMut, Mul, Not, Sub}; use std::ops::{Add, BitAnd, BitOr, BitXor, Div, Index, IndexMut, Mul, Not, Sub};
/// A columnmajor 2D matrix of `T` /// A columnmajor 2D matrix of `T`. Index as `Array(row, column)`.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct Matrix<T> { pub struct Matrix<T> {
rows: usize, rows: usize,