From 04637ef4d0728bc9328ee255b5d200b2b6b599f5 Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:05:46 +0100 Subject: [PATCH 01/12] Add methods to create zero, one, and filled matrices for f64 type --- src/matrix/mat.rs | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/matrix/mat.rs b/src/matrix/mat.rs index 64bf6de..bf4265b 100644 --- a/src/matrix/mat.rs +++ b/src/matrix/mat.rs @@ -310,6 +310,26 @@ impl Matrix { } } +impl Matrix { + /// Creates a new matrix filled with a specific value of the specified size. + pub fn filled(rows: usize, cols: usize, value: f64) -> Self { + Matrix { + rows, + cols, + data: vec![value; rows * cols], // Fill with the specified value + } + } + /// Creates a new matrix filled with zeros of the specified size. + pub fn zeros(rows: usize, cols: usize) -> Self { + Matrix::filled(rows, cols, 0.0) + } + + /// Creates a new matrix filled with ones of the specified size. + pub fn ones(rows: usize, cols: usize) -> Self { + Matrix::filled(rows, cols, 1.0) + } +} + impl Index<(usize, usize)> for Matrix { type Output = T; @@ -1794,4 +1814,25 @@ mod tests { } } } + + #[test] + fn test_matrix_zeros_ones_filled() { + // Test zeros + let m = Matrix::::zeros(2, 3); + assert_eq!(m.rows(), 2); + assert_eq!(m.cols(), 3); + assert_eq!(m.data(), &[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]); + + // Test ones + let m = Matrix::::ones(3, 2); + assert_eq!(m.rows(), 3); + assert_eq!(m.cols(), 2); + assert_eq!(m.data(), &[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]); + + // Test filled + let m = Matrix::::filled(2, 2, 42.5); + assert_eq!(m.rows(), 2); + assert_eq!(m.cols(), 2); + assert_eq!(m.data(), &[42.5, 42.5, 42.5, 42.5]); + } } From f749b2c921e21249a8ee192aed0fc44ace20a60b Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:38:24 +0100 Subject: [PATCH 02/12] Add method to retrieve a specific row from the matrix and corresponding tests --- src/matrix/mat.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/matrix/mat.rs b/src/matrix/mat.rs index bf4265b..dbae21a 100644 --- a/src/matrix/mat.rs +++ b/src/matrix/mat.rs @@ -179,6 +179,21 @@ impl Matrix { self.cols -= 1; } + #[inline] + pub fn row(&self, r: usize) -> Vec { + assert!( + r < self.rows, + "row index {} out of bounds for {} rows", + r, + self.rows + ); + let mut row_data = Vec::with_capacity(self.cols); + for c in 0..self.cols { + row_data.push(self[(r, c)].clone()); // Clone each element + } + row_data + } + /// Deletes a row from the matrix. Panics on out-of-bounds. /// This is O(N) where N is the number of elements, as it rebuilds the data vec. pub fn delete_row(&mut self, row: usize) { @@ -1130,6 +1145,21 @@ mod tests { matrix[(0, 3)] = 99; } + #[test] + fn test_row() { + let ma = static_test_matrix(); + assert_eq!(ma.row(0), &[1, 4, 7]); + assert_eq!(ma.row(1), &[2, 5, 8]); + assert_eq!(ma.row(2), &[3, 6, 9]); + } + + #[test] + #[should_panic(expected = "row index 3 out of bounds for 3 rows")] + fn test_row_out_of_bounds() { + let ma = static_test_matrix(); + ma.row(3); + } + #[test] fn test_column() { let matrix = static_test_matrix_2x4(); From 6718cf5de74a876a579b8da28f1632122063f7f1 Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:40:04 +0100 Subject: [PATCH 03/12] Add compute module and update lib.rs to include it --- src/compute/mod.rs | 3 +++ src/lib.rs | 3 +++ 2 files changed, 6 insertions(+) create mode 100644 src/compute/mod.rs diff --git a/src/compute/mod.rs b/src/compute/mod.rs new file mode 100644 index 0000000..b24f638 --- /dev/null +++ b/src/compute/mod.rs @@ -0,0 +1,3 @@ +pub mod activations; + +pub mod models; \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index a68c8c9..510f36d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,3 +8,6 @@ pub mod frame; /// Documentation for the [`crate::utils`] module. pub mod utils; + +/// Documentation for the [`crate::compute`] module. +pub mod compute; From dbbf5f96170466e91bcd43b2ea22d72524a9b0b4 Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:40:41 +0100 Subject: [PATCH 04/12] Add activation functions: sigmoid, dsigmoid, relu, and drelu --- src/compute/activations.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 src/compute/activations.rs diff --git a/src/compute/activations.rs b/src/compute/activations.rs new file mode 100644 index 0000000..80b9928 --- /dev/null +++ b/src/compute/activations.rs @@ -0,0 +1,18 @@ +use crate::matrix::{Matrix, SeriesOps}; + +pub fn sigmoid(x: &Matrix) -> Matrix { + x.map(|v| 1.0 / (1.0 + (-v).exp())) +} + +pub fn dsigmoid(y: &Matrix) -> Matrix { + // derivative w.r.t. pre-activation; takes y = sigmoid(x) + y.map(|v| v * (1.0 - v)) +} + +pub fn relu(x: &Matrix) -> Matrix { + x.map(|v| if v > 0.0 { v } else { 0.0 }) +} + +pub fn drelu(x: &Matrix) -> Matrix { + x.map(|v| if v > 0.0 { 1.0 } else { 0.0 }) +} From 1501ed5b7a4a6186ebfae89cab1b122d1dc5a780 Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:40:55 +0100 Subject: [PATCH 05/12] Add linear regression model implementation --- src/compute/models/linreg.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 src/compute/models/linreg.rs diff --git a/src/compute/models/linreg.rs b/src/compute/models/linreg.rs new file mode 100644 index 0000000..74e5e44 --- /dev/null +++ b/src/compute/models/linreg.rs @@ -0,0 +1,35 @@ +use crate::matrix::{Matrix, SeriesOps}; + +pub struct LinReg { + w: Matrix, // shape (n_features, 1) + b: f64, +} + +impl LinReg { + pub fn new(n_features: usize) -> Self { + Self { + w: Matrix::from_vec(vec![0.0; n_features], n_features, 1), + b: 0.0, + } + } + + pub fn predict(&self, x: &Matrix) -> Matrix { + // X.dot(w) + b + x.dot(&self.w) + self.b + } + + pub fn fit(&mut self, x: &Matrix, y: &Matrix, lr: f64, epochs: usize) { + let m = x.rows() as f64; + for _ in 0..epochs { + let y_hat = self.predict(x); + let err = &y_hat - y; // shape (m,1) + + // grads + let grad_w = x.transpose().dot(&err) * (2.0 / m); // (n,1) + let grad_b = (2.0 / m) * err.sum_vertical().iter().sum::(); + // update + self.w = &self.w - &(grad_w * lr); + self.b -= lr * grad_b; + } + } +} From be41e9b20ee6c1aee77320aa8e30cd0f7de3ff8d Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:41:14 +0100 Subject: [PATCH 06/12] Add logistic regression model implementation --- src/compute/models/logreg.rs | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 src/compute/models/logreg.rs diff --git a/src/compute/models/logreg.rs b/src/compute/models/logreg.rs new file mode 100644 index 0000000..1bdaa86 --- /dev/null +++ b/src/compute/models/logreg.rs @@ -0,0 +1,36 @@ +use crate::matrix::{Matrix, SeriesOps}; +use crate::compute::activations::sigmoid; + +pub struct LogReg { + w: Matrix, + b: f64, +} + +impl LogReg { + pub fn new(n_features: usize) -> Self { + Self { + w: Matrix::zeros(n_features, 1), + b: 0.0, + } + } + + pub fn predict_proba(&self, x: &Matrix) -> Matrix { + sigmoid(&(x.dot(&self.w) + self.b)) // σ(Xw + b) + } + + pub fn fit(&mut self, x: &Matrix, y: &Matrix, lr: f64, epochs: usize) { + let m = x.rows() as f64; + for _ in 0..epochs { + let p = self.predict_proba(x); // shape (m,1) + let err = &p - y; // derivative of BCE wrt pre-sigmoid + let grad_w = x.transpose().dot(&err) / m; + let grad_b = err.sum_vertical().iter().sum::() / m; + self.w = &self.w - &(grad_w * lr); + self.b -= lr * grad_b; + } + } + + pub fn predict(&self, x: &Matrix) -> Matrix { + self.predict_proba(x).map(|p| if p >= 0.5 { 1.0 } else { 0.0 }) + } +} From e2c5e65c18a94306f4aaa68339a4b9929198b0ea Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:41:56 +0100 Subject: [PATCH 07/12] move rand from dev-deps to deps --- Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d3d44f8..ec38c65 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,8 +14,6 @@ crate-type = ["cdylib", "lib"] [dependencies] chrono = "^0.4.10" criterion = { version = "0.5", features = ["html_reports"], optional = true } - -[dev-dependencies] rand = "^0.9.1" [features] From b1b7e63feacffdb7475aa1c77e921a00c137ca19 Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:42:08 +0100 Subject: [PATCH 08/12] Add Dense Neural Network implementation with forward and training methods --- src/compute/models/dense_nn.rs | 65 ++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 src/compute/models/dense_nn.rs diff --git a/src/compute/models/dense_nn.rs b/src/compute/models/dense_nn.rs new file mode 100644 index 0000000..1959c2a --- /dev/null +++ b/src/compute/models/dense_nn.rs @@ -0,0 +1,65 @@ +use crate::matrix::{Matrix, SeriesOps}; +use crate::compute::activations::{relu, sigmoid, drelu}; +use rand::Rng; + +pub struct DenseNN { + w1: Matrix, // (n_in, n_hidden) + b1: Matrix, // (1, n_hidden) + w2: Matrix, // (n_hidden, n_out) + b2: Matrix, // (1, n_out) +} + +impl DenseNN { + pub fn new(n_in: usize, n_hidden: usize, n_out: usize) -> Self { + let mut rng = rand::rng(); + let mut init = |rows, cols| { + let data = (0..rows * cols) + .map(|_| rng.random_range(-1.0..1.0)) + .collect::>(); + Matrix::from_vec(data, rows, cols) + }; + Self { + w1: init(n_in, n_hidden), + b1: Matrix::zeros(1, n_hidden), + w2: init(n_hidden, n_out), + b2: Matrix::zeros(1, n_out), + } + } + + pub fn forward(&self, x: &Matrix) -> (Matrix, Matrix, Matrix) { + // z1 = X·W1 + b1 ; a1 = ReLU(z1) + let z1 = x.dot(&self.w1) + &self.b1; + let a1 = relu(&z1); + // z2 = a1·W2 + b2 ; a2 = softmax(z2) (here binary => sigmoid) + let z2 = a1.dot(&self.w2) + &self.b2; + let a2 = sigmoid(&z2); // binary output + (a1, z2, a2) // keep intermediates for back-prop + } + + pub fn train(&mut self, x: &Matrix, y: &Matrix, lr: f64, epochs: usize) { + let m = x.rows() as f64; + for _ in 0..epochs { + let (a1, _z2, y_hat) = self.forward(x); + + // -------- backwards ---------- + // dL/da2 = y_hat - y (BCE derivative) + let dz2 = &y_hat - y; // (m, n_out) + let dw2 = a1.transpose().dot(&dz2) / m; // (n_h, n_out) + // let db2 = dz2.sum_vertical() * (1.0 / m); // broadcast ok + let db2 = Matrix::from_vec(dz2.sum_vertical(), 1, dz2.cols()) * (1.0 / m); // (1, n_out) + let da1 = dz2.dot(&self.w2.transpose()); // (m,n_h) + let dz1 = da1.zip(&a1, |g, act| g * drelu(&Matrix::from_cols(vec![vec![act]])).data()[0]); // (m,n_h) + + // real code: drelu returns Matrix, broadcasting needed; you can optimise. + + let dw1 = x.transpose().dot(&dz1) / m; // (n_in,n_h) + let db1 = Matrix::from_vec(dz1.sum_vertical(), 1, dz1.cols()) * (1.0 / m); // (1, n_h) + + // -------- update ---------- + self.w2 = &self.w2 - &(dw2 * lr); + self.b2 = &self.b2 - &(db2 * lr); + self.w1 = &self.w1 - &(dw1 * lr); + self.b1 = &self.b1 - &(db1 * lr); + } + } +} From b6645fcfbdea9a12087565b757f8528e23d1b2d0 Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:42:45 +0100 Subject: [PATCH 09/12] Add Gaussian Naive Bayes implementation with fit and predict methods --- src/compute/models/k_means.rs | 105 ++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 src/compute/models/k_means.rs diff --git a/src/compute/models/k_means.rs b/src/compute/models/k_means.rs new file mode 100644 index 0000000..716114f --- /dev/null +++ b/src/compute/models/k_means.rs @@ -0,0 +1,105 @@ +use crate::matrix::Matrix; +use std::collections::HashMap; + +pub struct GaussianNB { + classes: Vec, // distinct labels + priors: Vec, // P(class) + means: Vec>, + variances: Vec>, + eps: f64, // var-smoothing +} + +impl GaussianNB { + pub fn new(var_smoothing: f64) -> Self { + Self { + classes: vec![], + priors: vec![], + means: vec![], + variances: vec![], + eps: var_smoothing, + } + } + + pub fn fit(&mut self, x: &Matrix, y: &Matrix) { + let m = x.rows(); + let n = x.cols(); + assert_eq!(y.rows(), m); + assert_eq!(y.cols(), 1); + + // ----- group samples by label ----- + let mut groups: HashMap> = HashMap::new(); + for i in 0..m { + groups.entry(y[(i, 0)] as i64).or_default().push(i); + } + + self.classes = groups.keys().cloned().map(|v| v as f64).collect::>(); + self.classes.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + self.priors.clear(); + self.means.clear(); + self.variances.clear(); + + for &c in &self.classes { + let idx = &groups[&(c as i64)]; + let count = idx.len(); + self.priors.push(count as f64 / m as f64); + + let mut mean = Matrix::zeros(1, n); + let mut var = Matrix::zeros(1, n); + + // mean + for &i in idx { + for j in 0..n { + mean[(0, j)] += x[(i, j)]; + } + } + for j in 0..n { + mean[(0, j)] /= count as f64; + } + + // variance + for &i in idx { + for j in 0..n { + let d = x[(i, j)] - mean[(0, j)]; + var[(0, j)] += d * d; + } + } + for j in 0..n { + var[(0, j)] = var[(0, j)] / count as f64 + self.eps; + } + + self.means.push(mean); + self.variances.push(var); + } + } + + /// Return class labels (shape m×1) for samples in X. + pub fn predict(&self, x: &Matrix) -> Matrix { + let m = x.rows(); + let k = self.classes.len(); + let n = x.cols(); + let mut preds = Matrix::zeros(m, 1); + let ln_2pi = (2.0 * std::f64::consts::PI).ln(); + + for i in 0..m { + let mut best_class = 0usize; + let mut best_log_prob = f64::NEG_INFINITY; + for c in 0..k { + // log P(y=c) + Σ log N(x_j | μ, σ²) + let mut log_prob = self.priors[c].ln(); + for j in 0..n { + let mean = self.means[c][(0, j)]; + let var = self.variances[c][(0, j)]; + let diff = x[(i, j)] - mean; + log_prob += -0.5 * (diff * diff / var + var.ln() + ln_2pi); + } + if log_prob > best_log_prob { + best_log_prob = log_prob; + best_class = c; + } + } + preds[(i, 0)] = self.classes[best_class]; + } + preds + } +} From d4c0f174b16e4aa2df01833e2d1fadec5bf0544f Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:42:56 +0100 Subject: [PATCH 10/12] Add PCA implementation with fit and transform methods --- src/compute/models/pca.rs | 85 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 src/compute/models/pca.rs diff --git a/src/compute/models/pca.rs b/src/compute/models/pca.rs new file mode 100644 index 0000000..1ede2d9 --- /dev/null +++ b/src/compute/models/pca.rs @@ -0,0 +1,85 @@ +use crate::matrix::{Matrix, SeriesOps}; +use rand; + +/// Returns the `n_components` principal axes (rows) and the centred data’s mean. +pub struct PCA { + pub components: Matrix, // (n_components, n_features) + pub mean: Matrix, // (1, n_features) +} + +impl PCA { + pub fn fit(x: &Matrix, n_components: usize, iters: usize) -> Self { + let m = x.rows(); + let n = x.cols(); + assert!(n_components <= n); + + // ----- centre data ----- + let mean_vec = { + let mut v = Matrix::zeros(1, n); + for j in 0..n { + let mut s = 0.0; + for i in 0..m { + s += x[(i, j)]; + } + v[(0, j)] = s / m as f64; + } + v + }; + let x_centered = x - &mean_vec; + + // ----- covariance matrix C = Xᵀ·X / (m-1) ----- + let cov = x_centered.transpose().dot(&x_centered) * (1.0 / (m as f64 - 1.0)); + + // ----- power iteration to find top eigenvectors ----- + let mut comp = Matrix::zeros(n_components, n); + let mut b = Matrix::zeros(1, n); // current vector + for c in 0..n_components { + // random initial vector + for j in 0..n { + b[(0, j)] = rand::random::() - 0.5; + } + // subtract projections on previously found components + for prev in 0..c { + // let proj = b.dot(Matrix::from_vec(data, rows, cols).transpose())[(0, 0)]; + // let proj = b.dot(&comp.row(prev).transpose())[(0, 0)]; + let proj = b.dot(&Matrix::from_vec(comp.row(prev).to_vec(), 1, n).transpose())[(0, 0)]; + // subtract projection to maintain orthogonality + for j in 0..n { + b[(0, j)] -= proj * comp[(prev, j)]; + } + } + // iterate + for _ in 0..iters { + // b = C·bᵀ + let mut nb = cov.dot(&b.transpose()).transpose(); + // subtract projections again to maintain orthogonality + for prev in 0..c { + let proj = nb.dot(&Matrix::from_vec(comp.row(prev).to_vec(), 1, n).transpose())[(0, 0)]; + for j in 0..n { + nb[(0, j)] -= proj * comp[(prev, j)]; + } + } + // normalise + let norm = nb.data().iter().map(|v| v * v).sum::().sqrt(); + for j in 0..n { + nb[(0, j)] /= norm; + } + b = nb; + } + // store component + for j in 0..n { + comp[(c, j)] = b[(0, j)]; + } + } + Self { + components: comp, + mean: mean_vec, + } + } + + /// Project new data on the learned axes. + pub fn transform(&self, x: &Matrix) -> Matrix { + let x_centered = x - &self.mean; + x_centered.dot(&self.components.transpose()) + } +} From eb948c1f49e7cc208aad293d2f338da282ede664 Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:43:04 +0100 Subject: [PATCH 11/12] Add Gaussian Naive Bayes implementation with fit and predict methods --- src/compute/models/gaussian_nb.rs | 124 ++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 src/compute/models/gaussian_nb.rs diff --git a/src/compute/models/gaussian_nb.rs b/src/compute/models/gaussian_nb.rs new file mode 100644 index 0000000..b6cd659 --- /dev/null +++ b/src/compute/models/gaussian_nb.rs @@ -0,0 +1,124 @@ +use crate::matrix::{Matrix}; +use std::collections::HashMap; + +pub struct GaussianNB { + classes: Vec, // distinct labels + priors: Vec, // P(class) + means: Vec>, + variances: Vec>, + eps: f64, // var-smoothing +} + +impl GaussianNB { + pub fn new(var_smoothing: f64) -> Self { + Self { + classes: vec![], + priors: vec![], + means: vec![], + variances: vec![], + eps: var_smoothing, + } + } + + pub fn fit(&mut self, x: &Matrix, y: &Matrix) { + let m = x.rows(); + let n = x.cols(); + assert_eq!(y.rows(), m); + assert_eq!(y.cols(), 1); + if m == 0 || n == 0 { + panic!("Input matrix x or y is empty"); + } + + // ----- group samples by label ----- + let mut groups: HashMap> = HashMap::new(); + for i in 0..m { + let label_bits = y[(i, 0)].to_bits(); + groups.entry(label_bits).or_default().push(i); + } + if groups.is_empty() { + panic!("No class labels found in y"); + } + + self.classes = groups + .keys() + .cloned() + .map(f64::from_bits) + .collect::>(); + // Note: If NaN is present in class labels, this may panic. Ensure labels are valid floats. + self.classes.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + self.priors.clear(); + self.means.clear(); + self.variances.clear(); + + for &c in &self.classes { + let label_bits = c.to_bits(); + let idx = &groups[&label_bits]; + let count = idx.len(); + if count == 0 { + panic!("Class group for label {c} is empty"); + } + self.priors.push(count as f64 / m as f64); + + let mut mean = Matrix::zeros(1, n); + let mut var = Matrix::zeros(1, n); + + // mean + for &i in idx { + for j in 0..n { + mean[(0, j)] += x[(i, j)]; + } + } + for j in 0..n { + mean[(0, j)] /= count as f64; + } + + // variance + for &i in idx { + for j in 0..n { + let d = x[(i, j)] - mean[(0, j)]; + var[(0, j)] += d * d; + } + } + for j in 0..n { + var[(0, j)] = var[(0, j)] / count as f64 + self.eps; // always add eps after division + if var[(0, j)] <= 0.0 { + var[(0, j)] = self.eps; // ensure strictly positive variance + } + } + + self.means.push(mean); + self.variances.push(var); + } + } + + /// Return class labels (shape m×1) for samples in X. + pub fn predict(&self, x: &Matrix) -> Matrix { + let m = x.rows(); + let k = self.classes.len(); + let n = x.cols(); + let mut preds = Matrix::zeros(m, 1); + let ln_2pi = (2.0 * std::f64::consts::PI).ln(); + + for i in 0..m { + let mut best_class = 0usize; + let mut best_log_prob = f64::NEG_INFINITY; + for c in 0..k { + // log P(y=c) + Σ log N(x_j | μ, σ²) + let mut log_prob = self.priors[c].ln(); + for j in 0..n { + let mean = self.means[c][(0, j)]; + let var = self.variances[c][(0, j)]; + let diff = x[(i, j)] - mean; + log_prob += -0.5 * (diff * diff / var + var.ln() + ln_2pi); + } + if log_prob > best_log_prob { + best_log_prob = log_prob; + best_class = c; + } + } + preds[(i, 0)] = self.classes[best_class]; + } + preds + } +} From b279131503c928158de4991aa9ac7c12d6760efd Mon Sep 17 00:00:00 2001 From: Palash Tyagi <23239946+Magnus167@users.noreply.github.com> Date: Sun, 6 Jul 2025 17:43:17 +0100 Subject: [PATCH 12/12] Add model modules for linear regression, logistic regression, dense neural network, k-means, PCA, and Gaussian Naive Bayes --- src/compute/models/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 src/compute/models/mod.rs diff --git a/src/compute/models/mod.rs b/src/compute/models/mod.rs new file mode 100644 index 0000000..c617f08 --- /dev/null +++ b/src/compute/models/mod.rs @@ -0,0 +1,6 @@ +pub mod linreg; +pub mod logreg; +pub mod dense_nn; +pub mod k_means; +pub mod pca; +pub mod gaussian_nb; \ No newline at end of file