Compare commits

...

6 Commits

Author SHA1 Message Date
Palash Tyagi
5c3862c297 refactor: reorganize module exports for clarity 2025-04-14 00:25:23 +01:00
Palash Tyagi
3559a90ad2 adding blacklist ability 2025-04-14 00:25:09 +01:00
Palash Tyagi
b7368a366e add weight_xcat capability 2025-04-14 00:24:26 +01:00
Palash Tyagi
24a4176e17 fix: correct import statement for reduce_dataframe in historic_vol.rs 2025-04-14 00:24:02 +01:00
Palash Tyagi
165e1c19e4 updating linear composite binding 2025-04-14 00:22:59 +01:00
Palash Tyagi
fefe849394 updating notebook 2025-04-14 00:22:38 +01:00
6 changed files with 390 additions and 150 deletions

File diff suppressed because one or more lines are too long

View File

@ -68,7 +68,7 @@ pub fn get_period_indices_hv(dfw: PyDataFrame, est_freq: &str) -> PyResult<Vec<u
cids, cids,
weights = None, weights = None,
signs = None, signs = None,
weight_xcats = None, weight_xcat = None,
normalize_weights = false, normalize_weights = false,
start = None, start = None,
end = None, end = None,
@ -84,7 +84,7 @@ pub fn linear_composite(
cids: Vec<String>, cids: Vec<String>,
weights: Option<Vec<f64>>, weights: Option<Vec<f64>>,
signs: Option<Vec<f64>>, signs: Option<Vec<f64>>,
weight_xcats: Option<Vec<String>>, weight_xcat: Option<String>,
normalize_weights: bool, normalize_weights: bool,
start: Option<String>, start: Option<String>,
end: Option<String>, end: Option<String>,
@ -101,7 +101,7 @@ pub fn linear_composite(
cids, cids,
weights, weights,
signs, signs,
weight_xcats, weight_xcat,
normalize_weights, normalize_weights,
start, start,
end, end,

View File

@ -1,6 +1,6 @@
use crate::utils::dateutils::{get_bdates_from_col, get_min_max_real_dates}; use crate::utils::dateutils::{get_bdates_from_col, get_min_max_real_dates};
use crate::utils::qdf::pivots::*; use crate::utils::qdf::pivots::*;
use crate::utils::qdf::reduce_df::*; use crate::utils::qdf::reduce_dataframe;
use chrono::NaiveDate; use chrono::NaiveDate;
use ndarray::{s, Array, Array1, Zip}; use ndarray::{s, Array, Array1, Zip};
use polars::prelude::*; use polars::prelude::*;

View File

@ -1,6 +1,6 @@
use crate::utils::qdf::check_quantamental_dataframe; use crate::utils::qdf::check_quantamental_dataframe;
use crate::utils::qdf::pivots::*; use crate::utils::qdf::pivots::{pivot_dataframe_by_ticker, pivot_wide_dataframe_to_qdf};
use crate::utils::qdf::reduce_df::*; use crate::utils::qdf::reduce_df::reduce_dataframe;
use polars::prelude::*; use polars::prelude::*;
use std::collections::HashMap; use std::collections::HashMap;
const TOLERANCE: f64 = 1e-8; const TOLERANCE: f64 = 1e-8;
@ -108,14 +108,42 @@ fn _form_agg_nan_mask_series(nan_mask_dfw: &DataFrame) -> Result<Series, PolarsE
Ok(combined.into_series()) Ok(combined.into_series())
} }
/// Form the weights DataFrame
fn _form_agg_weights_dfw( fn _form_agg_weights_dfw(
agg_weights_map: &HashMap<String, Vec<f64>>, agg_weights_map: &HashMap<String, (WeightValue, f64)>,
data_dfw: DataFrame, dfw: &DataFrame,
) -> Result<DataFrame, PolarsError> { ) -> Result<DataFrame, PolarsError> {
let mut weights_dfw = DataFrame::new(vec![])?; let mut weights_dfw = DataFrame::new(vec![])?;
for (agg_targ, weight_signs) in agg_weights_map.iter() { for (agg_targ, weight_signs) in agg_weights_map.iter() {
let wgt = weight_signs[0] * weight_signs[1]; // let wgt = weight_signs[0] * weight_signs[1];
let wgt_series = Series::new(agg_targ.into(), vec![wgt; data_dfw.height()]); let wgt_series = match &weight_signs.0 {
WeightValue::F64(val) => {
let wgt = val * weight_signs.1;
Series::new(agg_targ.into(), vec![wgt; dfw.height()])
}
WeightValue::Str(vstr) => {
// vstr column from data_dfw, else raise wieght specification error
if !dfw.get_column_names().contains(&&PlSmallStr::from(vstr)) {
return Err(PolarsError::ComputeError(
format!(
"The column {} does not exist in the DataFrame. {:?}",
vstr, agg_weights_map
)
.into(),
));
}
let vstr_series = dfw.column(vstr)?;
let multiplied_series = vstr_series * weight_signs.1;
let mut multiplied_series =
multiplied_series.as_series().cloned().ok_or_else(|| {
PolarsError::ComputeError(
"Failed to convert multiplied_series to Series".into(),
)
})?;
multiplied_series.rename(agg_targ.into());
multiplied_series
}
};
weights_dfw.with_column(wgt_series)?; weights_dfw.with_column(wgt_series)?;
} }
Ok(weights_dfw) Ok(weights_dfw)
@ -143,14 +171,14 @@ fn perform_single_group_agg(
dfw: &DataFrame, dfw: &DataFrame,
agg_on: &String, agg_on: &String,
agg_targs: &Vec<String>, agg_targs: &Vec<String>,
agg_weights_map: &HashMap<String, Vec<f64>>, agg_weights_map: &HashMap<String, (WeightValue, f64)>,
normalize_weights: bool, normalize_weights: bool,
complete: bool, complete: bool,
) -> Result<Column, PolarsError> { ) -> Result<Column, PolarsError> {
let data_dfw = _form_agg_data_dfw(dfw, agg_targs)?; let data_dfw = _form_agg_data_dfw(dfw, agg_targs)?;
let nan_mask_dfw = _form_agg_nan_mask_dfw(&data_dfw)?; let nan_mask_dfw = _form_agg_nan_mask_dfw(&data_dfw)?;
let nan_mask_series = _form_agg_nan_mask_series(&nan_mask_dfw)?; let nan_mask_series = _form_agg_nan_mask_series(&nan_mask_dfw)?;
let weights_dfw = _form_agg_weights_dfw(agg_weights_map, data_dfw.clone())?; let weights_dfw = _form_agg_weights_dfw(agg_weights_map, dfw)?;
let weights_dfw = match normalize_weights { let weights_dfw = match normalize_weights {
true => normalize_weights_with_nan_mask(weights_dfw, nan_mask_dfw)?, true => normalize_weights_with_nan_mask(weights_dfw, nan_mask_dfw)?,
false => weights_dfw, false => weights_dfw,
@ -192,7 +220,7 @@ fn perform_single_group_agg(
fn perform_multiplication( fn perform_multiplication(
dfw: &DataFrame, dfw: &DataFrame,
mult_targets: &HashMap<String, Vec<String>>, mult_targets: &HashMap<String, Vec<String>>,
weights_map: &HashMap<String, HashMap<String, Vec<f64>>>, weights_map: &HashMap<String, HashMap<String, (WeightValue, f64)>>,
complete: bool, complete: bool,
normalize_weights: bool, normalize_weights: bool,
) -> Result<DataFrame, PolarsError> { ) -> Result<DataFrame, PolarsError> {
@ -200,6 +228,7 @@ fn perform_multiplication(
// let mut new_dfw = DataFrame::new(vec![real_date])?; // let mut new_dfw = DataFrame::new(vec![real_date])?;
let mut new_dfw = DataFrame::new(vec![])?; let mut new_dfw = DataFrame::new(vec![])?;
assert!(!mult_targets.is_empty(), "agg_targs is empty"); assert!(!mult_targets.is_empty(), "agg_targs is empty");
for (agg_on, agg_targs) in mult_targets.iter() { for (agg_on, agg_targs) in mult_targets.iter() {
// perform_single_group_agg // perform_single_group_agg
let cols_len = new_dfw.get_column_names().len(); let cols_len = new_dfw.get_column_names().len();
@ -288,76 +317,122 @@ fn get_mul_targets(
Ok(mul_targets) Ok(mul_targets)
} }
/// Builds a map of the shape:
/// `HashMap<String, HashMap<String, (WeightValue, f64)>>`
/// where only one of `weights` or `weight_xcats` can be provided.
/// If neither is provided, weights default to 1.0.
/// Each tuple is `(WeightValue, f64) = (weight, sign)`.
fn form_weights_and_signs_map( fn form_weights_and_signs_map(
cids: Vec<String>, cids: Vec<String>,
xcats: Vec<String>, xcats: Vec<String>,
weights: Option<Vec<f64>>, weights: Option<Vec<f64>>,
weight_xcat: Option<String>,
signs: Option<Vec<f64>>, signs: Option<Vec<f64>>,
) -> Result<HashMap<String, HashMap<String, Vec<f64>>>, Box<dyn std::error::Error>> { ) -> Result<HashMap<String, HashMap<String, (WeightValue, f64)>>, Box<dyn std::error::Error>> {
let _agg_xcats_for_cid = agg_xcats_for_cid(cids.clone(), xcats.clone()); // For demonstration, we pretend to load or infer these from helpers:
let agg_xcats_for_cid = agg_xcats_for_cid(cids.clone(), xcats.clone());
let (agg_on, agg_targ) = get_agg_on_agg_targs(cids.clone(), xcats.clone()); let (agg_on, agg_targ) = get_agg_on_agg_targs(cids.clone(), xcats.clone());
// if weights are None, create a vector of 1s of the same length as agg_targ // Determine if each weight option has non-empty values.
let weights = weights.unwrap_or(vec![1.0 / agg_targ.len() as f64; agg_targ.len()]); let weights_provided = weights.as_ref().map_or(false, |v| !v.is_empty());
let signs = signs.unwrap_or(vec![1.0; agg_targ.len()]); let weight_xcats_provided = weight_xcat.as_ref().map_or(false, |v| !v.is_empty());
// check that the lengths of weights and signs match the length of agg_targ // Enforce that only one of weights or weight_xcats is specified.
check_weights_signs_lengths( if weights_provided && weight_xcats_provided {
weights.clone(), return Err("Only one of `weights` and `weight_xcats` may be specified.".into());
signs.clone(), }
_agg_xcats_for_cid,
agg_targ.len(),
)?;
let mut weights_map = HashMap::new(); // 1) Build the "actual_weights" vector as WeightValue.
let actual_weights: Vec<WeightValue> = if weights_provided {
weights.unwrap().into_iter().map(WeightValue::F64).collect()
} else if weight_xcats_provided {
vec![WeightValue::Str(weight_xcat.unwrap()); agg_targ.len()]
} else {
// Default to numeric 1.0 if neither is provided
vec![WeightValue::F64(1.0); agg_targ.len()]
};
// 2) Build the "signs" vector; default to 1.0 if not provided
let signs = signs.unwrap_or_else(|| vec![1.0; agg_targ.len()]);
// 3) Optional: check lengths & zero values (only numeric weights).
check_weights_signs_lengths(&actual_weights, &signs, agg_xcats_for_cid, agg_targ.len())?;
// 4) Build the final nested HashMap
let mut weights_map: HashMap<String, HashMap<String, (WeightValue, f64)>> = HashMap::new();
for agg_o in agg_on { for agg_o in agg_on {
let mut agg_t_map = HashMap::new(); let mut agg_t_map = HashMap::new();
for (i, agg_t) in agg_targ.iter().enumerate() { for (i, agg_t) in agg_targ.iter().enumerate() {
let ticker = match _agg_xcats_for_cid { // Format the ticker
true => format!("{}_{}", agg_o, agg_t), let ticker = if agg_xcats_for_cid {
false => format!("{}_{}", agg_t, agg_o), format!("{}_{}", agg_o, agg_t)
} else {
format!("{}_{}", agg_t, agg_o)
}; };
let weight_signs = vec![weights[i], signs[i]]; // Build the tuple (WeightValue, f64)
agg_t_map.insert(ticker, weight_signs); let weight_sign_tuple = match &actual_weights[i] {
WeightValue::F64(val) => (WeightValue::F64(*val).clone(), signs[i]),
WeightValue::Str(vstr) => {
let new_str = format!("{}_{}", agg_t, vstr);
(WeightValue::Str(new_str), signs[i])
}
};
agg_t_map.insert(ticker, weight_sign_tuple);
} }
weights_map.insert(agg_o.clone(), agg_t_map); weights_map.insert(agg_o.clone(), agg_t_map);
} }
Ok(weights_map) Ok(weights_map)
} }
/// Checks that the given slices have the expected length and that:
/// - numeric weights are non-zero,
/// - signs are non-zero.
fn check_weights_signs_lengths( fn check_weights_signs_lengths(
weights_vec: Vec<f64>, weights_vec: &[WeightValue],
signs_vec: Vec<f64>, signs_vec: &[f64],
_agg_xcats_for_cid: bool, agg_xcats_for_cid: bool,
agg_targ_len: usize, agg_targ_len: usize,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<(), Box<dyn std::error::Error>> {
// for vx, vname in ... // For diagnostics, decide what to call the dimension
let agg_targ = match _agg_xcats_for_cid { let agg_targ = if agg_xcats_for_cid { "xcats" } else { "cids" };
true => "xcats",
false => "cids", // 1) Check numeric weights for zeroes.
}; for (i, weight) in weights_vec.iter().enumerate() {
for (vx, vname) in vec![ if let WeightValue::F64(val) = weight {
(weights_vec.clone(), "weights"), if *val == 0.0 {
(signs_vec.clone(), "signs"), return Err(format!("The weight at index {} is 0.0", i).into());
] {
for (i, v) in vx.iter().enumerate() {
if *v == 0.0 {
return Err(format!("The {} at index {} is 0.0", vname, i).into());
} }
} }
if vx.len() != agg_targ_len { }
return Err(format!( // 2) Ensure the weights vector is the expected length.
"The length of {} ({}) does not match the length of {} ({})", if weights_vec.len() != agg_targ_len {
vname, return Err(format!(
vx.len(), "The length of weights ({}) does not match the length of {} ({})",
agg_targ, weights_vec.len(),
agg_targ_len agg_targ,
) agg_targ_len
.into()); )
.into());
}
// 3) Check signs for zero.
for (i, sign) in signs_vec.iter().enumerate() {
if *sign == 0.0 {
return Err(format!("The sign at index {} is 0.0", i).into());
} }
} }
// 4) Ensure the signs vector is the expected length.
if signs_vec.len() != agg_targ_len {
return Err(format!(
"The length of signs ({}) does not match the length of {} ({})",
signs_vec.len(),
agg_targ,
agg_targ_len
)
.into());
}
Ok(()) Ok(())
} }
fn rename_result_dfw_cols( fn rename_result_dfw_cols(
@ -393,6 +468,36 @@ fn agg_xcats_for_cid(cids: Vec<String>, xcats: Vec<String>) -> bool {
xcats.len() > 1 xcats.len() > 1
} }
/// Represents a weight value that can be a string, (float, or integer).
#[derive(Debug, Clone, PartialEq)]
pub enum WeightValue {
Str(String),
F64(f64),
}
impl From<String> for WeightValue {
fn from(s: String) -> Self {
WeightValue::Str(s)
}
}
impl<'a> From<&'a str> for WeightValue {
fn from(s: &'a str) -> Self {
WeightValue::Str(s.to_string())
}
}
impl From<f64> for WeightValue {
fn from(f: f64) -> Self {
WeightValue::F64(f)
}
}
impl From<i32> for WeightValue {
fn from(i: i32) -> Self {
WeightValue::F64(i as f64)
}
}
/// Weighted linear combinations of cross sections or categories /// Weighted linear combinations of cross sections or categories
/// # Arguments /// # Arguments
/// * `df` - QDF DataFrame /// * `df` - QDF DataFrame
@ -417,7 +522,7 @@ pub fn linear_composite(
cids: Vec<String>, cids: Vec<String>,
weights: Option<Vec<f64>>, weights: Option<Vec<f64>>,
signs: Option<Vec<f64>>, signs: Option<Vec<f64>>,
weight_xcats: Option<Vec<String>>, weight_xcat: Option<String>,
normalize_weights: bool, normalize_weights: bool,
start: Option<String>, start: Option<String>,
end: Option<String>, end: Option<String>,
@ -429,10 +534,28 @@ pub fn linear_composite(
) -> Result<DataFrame, Box<dyn std::error::Error>> { ) -> Result<DataFrame, Box<dyn std::error::Error>> {
// Check if the DataFrame is a Quantamental DataFrame // Check if the DataFrame is a Quantamental DataFrame
check_quantamental_dataframe(df)?; check_quantamental_dataframe(df)?;
if agg_xcats_for_cid(cids.clone(), xcats.clone()) {
if weight_xcat.is_some() {
return Err(
format!(
"Using xcats as weights is not supported when aggregating cids for a single xcat. {:?} {:?}",
cids, xcats
)
.into(),
);
}
}
let mut rxcats = xcats.clone();
if weight_xcat.is_some() {
rxcats.extend(vec![weight_xcat.clone().unwrap()]);
}
let rdf = reduce_dataframe( let rdf = reduce_dataframe(
df.clone(), df.clone(),
Some(cids.clone()), Some(cids.clone()),
Some(xcats.clone()), Some(rxcats.clone()),
Some(vec!["value".to_string()]), Some(vec!["value".to_string()]),
start.clone(), start.clone(),
end.clone(), end.clone(),
@ -443,10 +566,11 @@ pub fn linear_composite(
let new_xcat = new_xcat.unwrap_or_else(|| "COMPOSITE".to_string()); let new_xcat = new_xcat.unwrap_or_else(|| "COMPOSITE".to_string());
let new_cid = new_cid.unwrap_or_else(|| "GLB".to_string()); let new_cid = new_cid.unwrap_or_else(|| "GLB".to_string());
let dfw = pivot_dataframe_by_ticker(rdf.clone(), Some("value".to_string())).unwrap(); let dfw = pivot_dataframe_by_ticker(rdf, Some("value".to_string())).unwrap();
let mul_targets = get_mul_targets(cids.clone(), xcats.clone())?; let mul_targets = get_mul_targets(cids.clone(), xcats.clone())?;
let weights_map = form_weights_and_signs_map(cids.clone(), xcats.clone(), weights, signs)?; let weights_map =
form_weights_and_signs_map(cids.clone(), xcats.clone(), weights, weight_xcat, signs)?;
for (ticker, targets) in mul_targets.iter() { for (ticker, targets) in mul_targets.iter() {
println!("ticker: {}, targets: {:?}", ticker, targets); println!("ticker: {}, targets: {:?}", ticker, targets);

View File

@ -0,0 +1,27 @@
use crate::utils::dateutils::{get_bdates_series_default_opt, get_min_max_real_dates};
use crate::utils::qdf::core::*;
use chrono::{Duration, NaiveDate};
use polars::prelude::*;
use std::collections::HashMap;
use std::error::Error;
use super::pivots::pivot_dataframe_by_ticker;
/// The required columns for a Quantamental DataFrame.
const QDF_INDEX_COLUMNS: [&str; 3] = ["real_date", "cid", "xcat"];
pub fn create_blacklist_from_qdf(
df: &DataFrame,
metric: Option<String>,
start: Option<String>,
end: Option<String>,
) -> Result<HashMap<String, Vec<String>>, Box<dyn Error>> {
// Verify that the DataFrame follows the Quantamental structure.
check_quantamental_dataframe(df)?;
let mut blacklist: HashMap<String, Vec<String>> = HashMap::new();
// Use the provided metric or default to "value".
let metric = metric.unwrap_or_else(|| "value".into());
Ok(blacklist)
}

View File

@ -1,11 +1,12 @@
pub mod blacklist;
pub mod core; pub mod core;
pub mod update_df;
pub mod load; pub mod load;
pub mod reduce_df;
pub mod pivots; pub mod pivots;
pub mod reduce_df;
pub mod update_df;
// Re-export submodules for easier access // Re-export submodules for easier access
pub use core::*; pub use core::*;
pub use update_df::*;
pub use load::*; pub use load::*;
pub use reduce_df::*; pub use reduce_df::*;
pub use update_df::*;