Compare commits

..

2 Commits

17 changed files with 230 additions and 1468 deletions

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
notebooks/** linguist-vendored

File diff suppressed because one or more lines are too long

View File

@@ -1,360 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# ! uv pip install E:\\Work\\ruzt\\msyrs --upgrade"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Import Python packages\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import macrosynergy\n",
"import pandas as pd\n",
"import numpy as np\n",
"import polars as pl\n",
"import os\n",
"import time\n",
"\n",
"from macrosynergy.panel import view_timelines\n",
"from macrosynergy.management.types import QuantamentalDataFrame\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Import Python bindings - `msyrs`\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import msyrs"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>bdates</th>\n",
" <th>0</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>2000-01-03</td>\n",
" <td>2000-01-03</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>2000-01-10</td>\n",
" <td>2000-01-10</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2000-01-17</td>\n",
" <td>2000-01-17</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>2000-01-24</td>\n",
" <td>2000-01-24</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>2000-01-31</td>\n",
" <td>2000-01-31</td>\n",
" </tr>\n",
" <tr>\n",
" <th>...</th>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1056</th>\n",
" <td>2020-03-30</td>\n",
" <td>2020-03-30</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1057</th>\n",
" <td>2020-04-06</td>\n",
" <td>2020-04-06</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1058</th>\n",
" <td>2020-04-13</td>\n",
" <td>2020-04-13</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1059</th>\n",
" <td>2020-04-20</td>\n",
" <td>2020-04-20</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1060</th>\n",
" <td>2020-04-27</td>\n",
" <td>2020-04-27</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>1061 rows × 2 columns</p>\n",
"</div>"
],
"text/plain": [
" bdates 0\n",
"0 2000-01-03 2000-01-03\n",
"1 2000-01-10 2000-01-10\n",
"2 2000-01-17 2000-01-17\n",
"3 2000-01-24 2000-01-24\n",
"4 2000-01-31 2000-01-31\n",
"... ... ...\n",
"1056 2020-03-30 2020-03-30\n",
"1057 2020-04-06 2020-04-06\n",
"1058 2020-04-13 2020-04-13\n",
"1059 2020-04-20 2020-04-20\n",
"1060 2020-04-27 2020-04-27\n",
"\n",
"[1061 rows x 2 columns]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"x = msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='W').to_pandas()\n",
"y = pd.Series(pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='W-MON'))\n",
"\n",
"pd.concat([x, y], axis=1)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Results for M\t & \tBMS\t are exactly the same\n",
"Results for Q\t & \tBQS\t are exactly the same\n",
"Results for W\t & \tW-MON\t are exactly the same\n",
"Results for WF\t & \tW-FRI\t are exactly the same\n"
]
}
],
"source": [
"for rs_freq, pd_freq in [('M', 'BMS'), ('Q', 'BQS'), ('W', 'W-MON'), ('WF', 'W-FRI')]:\n",
"\n",
"\n",
" x = msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq=rs_freq).to_pandas()\n",
" y = pd.Series(pd.bdate_range(start='2000-01-01', end='2020-05-01', freq=pd_freq))\n",
"\n",
" e = x == y\n",
" res = e.all()\n",
" non_matching_df = pd.concat([x[~e], y[~e]], axis=1)\n",
" assert res, f\"Results for {rs_freq}\\t and \\t{pd_freq}\\t are not the same\\n{non_matching_df}\"\n",
" print(f\"Results for {rs_freq}\\t & \\t{pd_freq}\\t are exactly the same\")\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"23.5 μs ± 1.02 μs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n",
"67.4 μs ± 979 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n",
"1.97 ms ± 57.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
"4.65 ms ± 170 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
"28.3 ms ± 898 μs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n",
"93.8 ms ± 2.02 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
]
}
],
"source": [
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='D')\n",
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='D')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='D')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='D')\n",
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='B')\n",
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='B')"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"7.95 μs ± 146 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"17.9 μs ± 108 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"1.73 ms ± 20.8 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
"4 ms ± 69.3 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
"5.69 ms ± 139 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
"19.1 ms ± 268 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
]
}
],
"source": [
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='WF')\n",
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='WF')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='WF')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='WF')\n",
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='W-FRI')\n",
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='W-FRI')"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6.9 μs ± 126 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"13.1 μs ± 93.3 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"1.73 ms ± 29.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
"4.2 ms ± 81.5 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
"931 μs ± 14.2 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
"3.05 ms ± 47.5 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
]
}
],
"source": [
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='ME')\n",
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='ME')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='ME')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='ME')\n",
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='BME')\n",
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='BME')"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3.65 μs ± 69.1 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"4.78 μs ± 38.7 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"1.73 ms ± 122 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
"4.16 ms ± 286 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
"340 μs ± 11.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
"1.1 ms ± 11.5 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n"
]
}
],
"source": [
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='Q')\n",
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='Q')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='Q')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='Q')\n",
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='BQS')\n",
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='BQS')"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"3.21 μs ± 83.4 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"3.66 μs ± 198 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
"2.67 ms ± 459 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
"3.71 ms ± 143 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
"98.7 μs ± 1.47 μs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n",
"289 μs ± 15.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n"
]
}
],
"source": [
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='YE')\n",
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='YE')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='YE')\n",
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='YE')\n",
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='BYE')\n",
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='BYE')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

File diff suppressed because one or more lines are too long

View File

@@ -1,33 +0,0 @@
#!/bin/bash
# Exit immediately if a command exits with a non-zero status
set -e
# Run "maturin --help". If it fails, print an error message and exit.
if ! maturin --help > /dev/null 2>&1; then
echo "Failed to run maturin --help" >&2
exit 1
fi
# Delete any existing build directory and create a new one.
rm -rf ./build
mkdir -p ./build
# Copy ./src/msyrs.pyi to ./msyrs.pyi.
cp ./src/msyrs.pyi ./msyrs.pyi
# Build using maturin.
maturin build --release --sdist --out ./build/
# Get the first wheel file found in the build directory.
whl_file=$(ls ./build/*.whl 2>/dev/null | head -n 1)
if [ -z "$whl_file" ]; then
echo "No wheel file found in ./build" >&2
exit 1
fi
# Rename the wheel file from .whl to .zip.
base_name="${whl_file%.whl}"
mv "$whl_file" "${base_name}.zip"
# Delete the temporary .pyi file.
rm ./msyrs.pyi

View File

@@ -1,20 +0,0 @@
#!/bin/bash
set -e
# Ensure maturin is installed. For example, you can install it via:
# pip install maturin
# Run "maturin --help". If it fails, print an error message and exit.
if ! maturin --help > /dev/null 2>&1; then
echo "Failed to run maturin --help" >&2
exit 1
fi
# Copy ./src/msyrs.pyi to the current directory as msyrs.pyi
cp ./src/msyrs.pyi ./msyrs.pyi
# Run maturin develop in release mode.
maturin develop --release
# Delete the temporary msyrs.pyi file.
rm ./msyrs.pyi

View File

@@ -68,7 +68,7 @@ pub fn get_period_indices_hv(dfw: PyDataFrame, est_freq: &str) -> PyResult<Vec<u
cids,
weights = None,
signs = None,
weight_xcat = None,
weight_xcats = None,
normalize_weights = false,
start = None,
end = None,
@@ -84,7 +84,7 @@ pub fn linear_composite(
cids: Vec<String>,
weights: Option<Vec<f64>>,
signs: Option<Vec<f64>>,
weight_xcat: Option<String>,
weight_xcats: Option<Vec<String>>,
normalize_weights: bool,
start: Option<String>,
end: Option<String>,
@@ -101,7 +101,7 @@ pub fn linear_composite(
cids,
weights,
signs,
weight_xcat,
weight_xcats,
normalize_weights,
start,
end,

View File

@@ -1,62 +1,22 @@
use pyo3::{prelude::*, types::PyDict};
use pyo3::prelude::*;
use pyo3_polars::{PyDataFrame, PySeries};
/// Python wrapper for [`crate::utils::qdf`] module.
#[allow(deprecated)]
#[pymodule]
pub fn utils(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(get_bdates_series_default_pl, m)?)?;
m.add_function(wrap_pyfunction!(get_bdates_series_default_opt, m)?)?;
m.add_function(wrap_pyfunction!(create_blacklist_from_qdf, m)?)?;
m.add_function(wrap_pyfunction!(get_bdates_series_default, m)?)?;
Ok(())
}
#[pyfunction]
pub fn get_bdates_series_default_pl(
pub fn get_bdates_series_default(
start_date: String,
end_date: String,
freq: Option<String>,
) -> PyResult<PySeries> {
Ok(PySeries(
crate::utils::dateutils::get_bdates_series_default_pl(start_date, end_date, freq)
crate::utils::dateutils::get_bdates_series_default(start_date, end_date, freq)
.map_err(|e| PyErr::new::<pyo3::exceptions::PyValueError, _>(format!("{}", e)))?,
))
}
#[pyfunction]
pub fn get_bdates_series_default_opt(
start_date: String,
end_date: String,
freq: Option<String>,
) -> PyResult<PySeries> {
Ok(PySeries(
crate::utils::dateutils::get_bdates_series_default_opt(start_date, end_date, freq)
.map_err(|e| PyErr::new::<pyo3::exceptions::PyValueError, _>(format!("{}", e)))?,
))
}
#[allow(deprecated)]
#[pyfunction(signature = (df, group_by_cid=None, blacklist_name=None, metrics=None))]
pub fn create_blacklist_from_qdf(
df: PyDataFrame,
group_by_cid: Option<bool>,
blacklist_name: Option<String>,
metrics: Option<Vec<String>>,
) -> PyResult<PyObject> {
let result = crate::utils::qdf::blacklist::create_blacklist_from_qdf(
&df.into(),
group_by_cid,
blacklist_name,
metrics,
)
.map_err(|e| PyErr::new::<pyo3::exceptions::PyValueError, _>(format!("{}", e)))?;
Python::with_gil(|py| {
let dict = PyDict::new(py);
// for (key, (start_date, end_date)) in result {
// dict.set_item(key, (start_date, end_date))
for (key, dates) in result {
dict.set_item(key, dates).map_err(|e| PyErr::from(e))?;
}
Ok(dict.into())
})
}

View File

@@ -58,7 +58,7 @@ fn all_jpmaq_expressions(expressions: Vec<String>) -> bool {
///
/// Example Usage:
///
/// ```ignore
/// ```rust
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
///
@@ -102,7 +102,7 @@ impl Default for JPMaQSDownloadGetIndicatorArgs {
/// Struct for downloading data from the JPMaQS data from JPMorgan DataQuery API.
///
/// ## Example Usage
/// ```ignore
/// ```rust
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
/// use polars::prelude::*;
@@ -277,7 +277,7 @@ impl JPMaQSDownload {
///
/// Usage:
///
/// ```ignore
/// ```rust
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
/// let mut jpamqs_download = JPMaQSDownload::default();

View File

@@ -51,10 +51,6 @@ class panel:
def linear_composite(*args, **kwargs) -> DataFrame: ...
class utils:
__all__ = ["get_bdates_series_default", "get_bdates_series_default_opt"]
__all__ = ["get_bdates_series_default"]
@staticmethod
def get_bdates_series_default_pl(*args, **kwargs) -> Series: ...
@staticmethod
def get_bdates_series_default_opt(*args, **kwargs) -> Series: ...
@staticmethod
def create_blacklist_from_qdf(*args, **kwargs) -> dict: ...
def get_bdates_series_default(*args, **kwargs) -> Series: ...

View File

@@ -1,6 +1,6 @@
use crate::utils::dateutils::{get_bdates_from_col, get_min_max_real_dates};
use crate::utils::qdf::pivots::*;
use crate::utils::qdf::reduce_dataframe;
use crate::utils::qdf::reduce_df::*;
use chrono::NaiveDate;
use ndarray::{s, Array, Array1, Zip};
use polars::prelude::*;

View File

@@ -1,6 +1,6 @@
use crate::utils::qdf::check_quantamental_dataframe;
use crate::utils::qdf::pivots::{pivot_dataframe_by_ticker, pivot_wide_dataframe_to_qdf};
use crate::utils::qdf::reduce_df::reduce_dataframe;
use crate::utils::qdf::pivots::*;
use crate::utils::qdf::reduce_df::*;
use polars::prelude::*;
use std::collections::HashMap;
const TOLERANCE: f64 = 1e-8;
@@ -108,42 +108,14 @@ fn _form_agg_nan_mask_series(nan_mask_dfw: &DataFrame) -> Result<Series, PolarsE
Ok(combined.into_series())
}
/// Form the weights DataFrame
fn _form_agg_weights_dfw(
agg_weights_map: &HashMap<String, (WeightValue, f64)>,
dfw: &DataFrame,
agg_weights_map: &HashMap<String, Vec<f64>>,
data_dfw: DataFrame,
) -> Result<DataFrame, PolarsError> {
let mut weights_dfw = DataFrame::new(vec![])?;
for (agg_targ, weight_signs) in agg_weights_map.iter() {
// let wgt = weight_signs[0] * weight_signs[1];
let wgt_series = match &weight_signs.0 {
WeightValue::F64(val) => {
let wgt = val * weight_signs.1;
Series::new(agg_targ.into(), vec![wgt; dfw.height()])
}
WeightValue::Str(vstr) => {
// vstr column from data_dfw, else raise wieght specification error
if !dfw.get_column_names().contains(&&PlSmallStr::from(vstr)) {
return Err(PolarsError::ComputeError(
format!(
"The column {} does not exist in the DataFrame. {:?}",
vstr, agg_weights_map
)
.into(),
));
}
let vstr_series = dfw.column(vstr)?;
let multiplied_series = vstr_series * weight_signs.1;
let mut multiplied_series =
multiplied_series.as_series().cloned().ok_or_else(|| {
PolarsError::ComputeError(
"Failed to convert multiplied_series to Series".into(),
)
})?;
multiplied_series.rename(agg_targ.into());
multiplied_series
}
};
let wgt = weight_signs[0] * weight_signs[1];
let wgt_series = Series::new(agg_targ.into(), vec![wgt; data_dfw.height()]);
weights_dfw.with_column(wgt_series)?;
}
Ok(weights_dfw)
@@ -171,14 +143,14 @@ fn perform_single_group_agg(
dfw: &DataFrame,
agg_on: &String,
agg_targs: &Vec<String>,
agg_weights_map: &HashMap<String, (WeightValue, f64)>,
agg_weights_map: &HashMap<String, Vec<f64>>,
normalize_weights: bool,
complete: bool,
) -> Result<Column, PolarsError> {
let data_dfw = _form_agg_data_dfw(dfw, agg_targs)?;
let nan_mask_dfw = _form_agg_nan_mask_dfw(&data_dfw)?;
let nan_mask_series = _form_agg_nan_mask_series(&nan_mask_dfw)?;
let weights_dfw = _form_agg_weights_dfw(agg_weights_map, dfw)?;
let weights_dfw = _form_agg_weights_dfw(agg_weights_map, data_dfw.clone())?;
let weights_dfw = match normalize_weights {
true => normalize_weights_with_nan_mask(weights_dfw, nan_mask_dfw)?,
false => weights_dfw,
@@ -220,7 +192,7 @@ fn perform_single_group_agg(
fn perform_multiplication(
dfw: &DataFrame,
mult_targets: &HashMap<String, Vec<String>>,
weights_map: &HashMap<String, HashMap<String, (WeightValue, f64)>>,
weights_map: &HashMap<String, HashMap<String, Vec<f64>>>,
complete: bool,
normalize_weights: bool,
) -> Result<DataFrame, PolarsError> {
@@ -228,7 +200,6 @@ fn perform_multiplication(
// let mut new_dfw = DataFrame::new(vec![real_date])?;
let mut new_dfw = DataFrame::new(vec![])?;
assert!(!mult_targets.is_empty(), "agg_targs is empty");
for (agg_on, agg_targs) in mult_targets.iter() {
// perform_single_group_agg
let cols_len = new_dfw.get_column_names().len();
@@ -317,122 +288,76 @@ fn get_mul_targets(
Ok(mul_targets)
}
/// Builds a map of the shape:
/// `HashMap<String, HashMap<String, (WeightValue, f64)>>`
/// where only one of `weights` or `weight_xcats` can be provided.
/// If neither is provided, weights default to 1.0.
/// Each tuple is `(WeightValue, f64) = (weight, sign)`.
fn form_weights_and_signs_map(
cids: Vec<String>,
xcats: Vec<String>,
weights: Option<Vec<f64>>,
weight_xcat: Option<String>,
signs: Option<Vec<f64>>,
) -> Result<HashMap<String, HashMap<String, (WeightValue, f64)>>, Box<dyn std::error::Error>> {
// For demonstration, we pretend to load or infer these from helpers:
let agg_xcats_for_cid = agg_xcats_for_cid(cids.clone(), xcats.clone());
) -> Result<HashMap<String, HashMap<String, Vec<f64>>>, Box<dyn std::error::Error>> {
let _agg_xcats_for_cid = agg_xcats_for_cid(cids.clone(), xcats.clone());
let (agg_on, agg_targ) = get_agg_on_agg_targs(cids.clone(), xcats.clone());
// Determine if each weight option has non-empty values.
let weights_provided = weights.as_ref().map_or(false, |v| !v.is_empty());
let weight_xcats_provided = weight_xcat.as_ref().map_or(false, |v| !v.is_empty());
// if weights are None, create a vector of 1s of the same length as agg_targ
let weights = weights.unwrap_or(vec![1.0 / agg_targ.len() as f64; agg_targ.len()]);
let signs = signs.unwrap_or(vec![1.0; agg_targ.len()]);
// Enforce that only one of weights or weight_xcats is specified.
if weights_provided && weight_xcats_provided {
return Err("Only one of `weights` and `weight_xcats` may be specified.".into());
}
// check that the lengths of weights and signs match the length of agg_targ
check_weights_signs_lengths(
weights.clone(),
signs.clone(),
_agg_xcats_for_cid,
agg_targ.len(),
)?;
// 1) Build the "actual_weights" vector as WeightValue.
let actual_weights: Vec<WeightValue> = if weights_provided {
weights.unwrap().into_iter().map(WeightValue::F64).collect()
} else if weight_xcats_provided {
vec![WeightValue::Str(weight_xcat.unwrap()); agg_targ.len()]
} else {
// Default to numeric 1.0 if neither is provided
vec![WeightValue::F64(1.0); agg_targ.len()]
};
// 2) Build the "signs" vector; default to 1.0 if not provided
let signs = signs.unwrap_or_else(|| vec![1.0; agg_targ.len()]);
// 3) Optional: check lengths & zero values (only numeric weights).
check_weights_signs_lengths(&actual_weights, &signs, agg_xcats_for_cid, agg_targ.len())?;
// 4) Build the final nested HashMap
let mut weights_map: HashMap<String, HashMap<String, (WeightValue, f64)>> = HashMap::new();
let mut weights_map = HashMap::new();
for agg_o in agg_on {
let mut agg_t_map = HashMap::new();
for (i, agg_t) in agg_targ.iter().enumerate() {
// Format the ticker
let ticker = if agg_xcats_for_cid {
format!("{}_{}", agg_o, agg_t)
} else {
format!("{}_{}", agg_t, agg_o)
let ticker = match _agg_xcats_for_cid {
true => format!("{}_{}", agg_o, agg_t),
false => format!("{}_{}", agg_t, agg_o),
};
// Build the tuple (WeightValue, f64)
let weight_sign_tuple = match &actual_weights[i] {
WeightValue::F64(val) => (WeightValue::F64(*val).clone(), signs[i]),
WeightValue::Str(vstr) => {
let new_str = format!("{}_{}", agg_t, vstr);
(WeightValue::Str(new_str), signs[i])
}
};
agg_t_map.insert(ticker, weight_sign_tuple);
let weight_signs = vec![weights[i], signs[i]];
agg_t_map.insert(ticker, weight_signs);
}
weights_map.insert(agg_o.clone(), agg_t_map);
}
Ok(weights_map)
}
/// Checks that the given slices have the expected length and that:
/// - numeric weights are non-zero,
/// - signs are non-zero.
fn check_weights_signs_lengths(
weights_vec: &[WeightValue],
signs_vec: &[f64],
agg_xcats_for_cid: bool,
weights_vec: Vec<f64>,
signs_vec: Vec<f64>,
_agg_xcats_for_cid: bool,
agg_targ_len: usize,
) -> Result<(), Box<dyn std::error::Error>> {
// For diagnostics, decide what to call the dimension
let agg_targ = if agg_xcats_for_cid { "xcats" } else { "cids" };
// 1) Check numeric weights for zeroes.
for (i, weight) in weights_vec.iter().enumerate() {
if let WeightValue::F64(val) = weight {
if *val == 0.0 {
return Err(format!("The weight at index {} is 0.0", i).into());
// for vx, vname in ...
let agg_targ = match _agg_xcats_for_cid {
true => "xcats",
false => "cids",
};
for (vx, vname) in vec![
(weights_vec.clone(), "weights"),
(signs_vec.clone(), "signs"),
] {
for (i, v) in vx.iter().enumerate() {
if *v == 0.0 {
return Err(format!("The {} at index {} is 0.0", vname, i).into());
}
}
}
// 2) Ensure the weights vector is the expected length.
if weights_vec.len() != agg_targ_len {
return Err(format!(
"The length of weights ({}) does not match the length of {} ({})",
weights_vec.len(),
agg_targ,
agg_targ_len
)
.into());
}
// 3) Check signs for zero.
for (i, sign) in signs_vec.iter().enumerate() {
if *sign == 0.0 {
return Err(format!("The sign at index {} is 0.0", i).into());
if vx.len() != agg_targ_len {
return Err(format!(
"The length of {} ({}) does not match the length of {} ({})",
vname,
vx.len(),
agg_targ,
agg_targ_len
)
.into());
}
}
// 4) Ensure the signs vector is the expected length.
if signs_vec.len() != agg_targ_len {
return Err(format!(
"The length of signs ({}) does not match the length of {} ({})",
signs_vec.len(),
agg_targ,
agg_targ_len
)
.into());
}
Ok(())
}
fn rename_result_dfw_cols(
@@ -468,36 +393,6 @@ fn agg_xcats_for_cid(cids: Vec<String>, xcats: Vec<String>) -> bool {
xcats.len() > 1
}
/// Represents a weight value that can be a string, (float, or integer).
#[derive(Debug, Clone, PartialEq)]
pub enum WeightValue {
Str(String),
F64(f64),
}
impl From<String> for WeightValue {
fn from(s: String) -> Self {
WeightValue::Str(s)
}
}
impl<'a> From<&'a str> for WeightValue {
fn from(s: &'a str) -> Self {
WeightValue::Str(s.to_string())
}
}
impl From<f64> for WeightValue {
fn from(f: f64) -> Self {
WeightValue::F64(f)
}
}
impl From<i32> for WeightValue {
fn from(i: i32) -> Self {
WeightValue::F64(i as f64)
}
}
/// Weighted linear combinations of cross sections or categories
/// # Arguments
/// * `df` - QDF DataFrame
@@ -522,7 +417,7 @@ pub fn linear_composite(
cids: Vec<String>,
weights: Option<Vec<f64>>,
signs: Option<Vec<f64>>,
weight_xcat: Option<String>,
weight_xcats: Option<Vec<String>>,
normalize_weights: bool,
start: Option<String>,
end: Option<String>,
@@ -534,28 +429,10 @@ pub fn linear_composite(
) -> Result<DataFrame, Box<dyn std::error::Error>> {
// Check if the DataFrame is a Quantamental DataFrame
check_quantamental_dataframe(df)?;
if agg_xcats_for_cid(cids.clone(), xcats.clone()) {
if weight_xcat.is_some() {
return Err(
format!(
"Using xcats as weights is not supported when aggregating cids for a single xcat. {:?} {:?}",
cids, xcats
)
.into(),
);
}
}
let mut rxcats = xcats.clone();
if weight_xcat.is_some() {
rxcats.extend(vec![weight_xcat.clone().unwrap()]);
}
let rdf = reduce_dataframe(
df.clone(),
Some(cids.clone()),
Some(rxcats.clone()),
Some(xcats.clone()),
Some(vec!["value".to_string()]),
start.clone(),
end.clone(),
@@ -566,11 +443,10 @@ pub fn linear_composite(
let new_xcat = new_xcat.unwrap_or_else(|| "COMPOSITE".to_string());
let new_cid = new_cid.unwrap_or_else(|| "GLB".to_string());
let dfw = pivot_dataframe_by_ticker(rdf, Some("value".to_string())).unwrap();
let dfw = pivot_dataframe_by_ticker(rdf.clone(), Some("value".to_string())).unwrap();
let mul_targets = get_mul_targets(cids.clone(), xcats.clone())?;
let weights_map =
form_weights_and_signs_map(cids.clone(), xcats.clone(), weights, weight_xcat, signs)?;
let weights_map = form_weights_and_signs_map(cids.clone(), xcats.clone(), weights, signs)?;
for (ticker, targets) in mul_targets.iter() {
println!("ticker: {}, targets: {:?}", ticker, targets);

View File

@@ -1,4 +1,4 @@
use crate::utils::bdates;
use crate::utils::bdates::get_bdates_list_with_freq;
use crate::utils::bdates::BDateFreq;
use chrono::NaiveDate;
use chrono::{Datelike, Weekday};
@@ -36,110 +36,25 @@ pub fn get_min_max_real_dates(
}
}
/// Get the business dates between two dates.
pub fn get_bdates_list(
start_date: String,
end_date: String,
) -> Result<Vec<NaiveDate>, Box<dyn Error>> {
let start_date = NaiveDate::parse_from_str(&start_date, "%Y-%m-%d")?;
let end_date = NaiveDate::parse_from_str(&end_date, "%Y-%m-%d")?;
let mut business_days = Vec::new();
let mut current_date = start_date;
while current_date <= end_date {
// Check if the current date is a business day (not Saturday or Sunday)
if current_date.weekday() != Weekday::Sat && current_date.weekday() != Weekday::Sun {
business_days.push(current_date);
}
current_date = current_date.succ_opt().ok_or(format!(
"Failed to get the next day for : {:?}",
current_date
))?;
}
Ok(business_days)
}
fn compute_group_key(d: NaiveDate, freq: BDateFreq) -> String {
match freq {
// For Daily, each date is its own group.
BDateFreq::Daily => format!("{}", d),
// For weekly grouping, we use ISO week information.
BDateFreq::WeeklyMonday | BDateFreq::WeeklyFriday => {
let iso = d.iso_week();
format!("{}-W{:02}", iso.year(), iso.week())
}
// Group by Year-Month.
BDateFreq::MonthStart | BDateFreq::MonthEnd => {
format!("{}-M{:02}", d.year(), d.month())
}
// Group by Year-Quarter.
BDateFreq::QuarterStart | BDateFreq::QuarterEnd => {
let quarter = (d.month() - 1) / 3 + 1;
format!("{}-Q{}", d.year(), quarter)
}
// Group by Year.
BDateFreq::YearStart | BDateFreq::YearEnd => format!("{}", d.year()),
}
}
pub fn get_bdates_series_default_opt(
pub fn get_bdates_series_default(
start_date: String,
end_date: String,
freq: Option<String>,
) -> Result<Series, Box<dyn Error>> {
let freq = freq.unwrap_or_else(|| "D".to_string());
let freq = BDateFreq::from_str(&freq)?;
let series = Series::new(
"bdates".into(),
bdates::get_bdates_list_with_freq(&start_date, &end_date, freq)?,
);
Ok(series)
}
pub fn get_bdates_series_default_pl(
start_date: String,
end_date: String,
freq: Option<String>,
) -> Result<Series, Box<dyn Error>> {
let freq = freq.unwrap_or_else(|| "D".to_string());
let freq = BDateFreq::from_str(&freq)?;
get_bdates_series_pl(start_date, end_date, freq)
get_bdates_series(start_date, end_date, freq)
}
/// Get the business dates between two dates as a Series.
pub fn get_bdates_series_pl(
pub fn get_bdates_series(
start_date: String,
end_date: String,
freq: BDateFreq,
) -> Result<Series, Box<dyn Error>> {
let business_days = get_bdates_list(start_date, end_date)?;
let group_keys: Vec<String> = business_days
.iter()
.map(|&d| compute_group_key(d, freq))
.collect();
let df = DataFrame::new(vec![
Column::new("bdates".into(), business_days),
Column::new("group".into(), group_keys),
])?;
let gb = df.lazy().group_by(["group"]);
let aggx = match freq.agg_type() {
bdates::AggregationType::Start => gb.agg([col("bdates").first()]),
bdates::AggregationType::End => gb.agg([col("bdates").last()]),
};
let result = aggx.collect()?;
let result = result
.column("bdates")?
.as_series()
.ok_or("Column 'bdates' not found")?
.clone();
let result = result.sort(SortOptions {
descending: false,
nulls_last: false,
multithreaded: false,
maintain_order: false,
})?;
Ok(result)
let bdates_list = get_bdates_list_with_freq(&start_date, &end_date, freq)?;
let bdates_series = Series::new("bdates".into(), bdates_list);
Ok(bdates_series)
}
/// Get the business dates from a date column in a DataFrame.

View File

@@ -1,373 +0,0 @@
use crate::utils::bdates::{get_bdates_list_with_freq, BDateFreq};
use crate::utils::dateutils::get_min_max_real_dates;
use crate::utils::misc::get_cid;
use crate::utils::qdf::core::check_quantamental_dataframe;
use chrono::NaiveDate;
use polars::prelude::*;
use std::collections::{BTreeMap, HashMap};
use std::error::Error;
use crate::utils::qdf::get_unique_metrics;
// struct Blacklist which is a wrapper around hashmap and btreemap
#[derive(Debug, Clone)]
pub struct Blacklist {
pub blacklist: BTreeMap<String, (String, String)>,
}
// impl hashmap into
impl Blacklist {
pub fn into_hashmap(self) -> HashMap<String, (String, String)> {
self.blacklist.into_iter().collect()
}
}
/// Apply a blacklist to a Quantamental DataFrame.
///
/// * `blacklist` is a map from any “tickerlike” key to a tuple of
/// `(start_date, end_date)` in **inclusive** `"YYYYMMDD"` format.
/// * `metrics` if `None`, every metric from `get_unique_metrics(df)`
/// is used.
/// * `group_by_cid = Some(false)` is not implemented yet.
pub fn apply_blacklist(
df: &mut DataFrame,
blacklist: &BTreeMap<String, (String, String)>,
metrics: Option<Vec<String>>,
group_by_cid: Option<bool>,
) -> Result<DataFrame, Box<dyn std::error::Error>> {
check_quantamental_dataframe(df)?;
// dataframe is like:
// | cid | xcat | real_date | metric1 | metric2 |
// |-----|------|-----------|---------|---------|
// | A | B | 2023-01-01| 1.0 | 2.0 |
// | A | B | 2023-01-02| 1.0 | 2.0 |
// | A | C | 2023-01-01| 1.0 | 2.0 |
// | A | C | 2023-01-02| 1.0 | 2.0 |
// | D | E | 2023-01-01| 1.0 | 2.0 |
// | D | E | 2023-01-02| 1.0 | 2.0 |
// (real date column is Naive date)
// blacklist is like:
// {'A_B_1': ('2023-01-02', '2023-01-03'),
// 'A_B_2': ('2023-01-04', '2023-01-05'),
// 'A_C_1': ('2023-01-02', '2023-01-03'), }
// get_cid('A_B_1') = 'A'
// get_cid('A_B_2') = 'A'
// get_cid('D_E_1') = 'D'
Ok(df.clone())
}
/// Create a blacklist from a Quantamental DataFrame.
/// The blacklist is a mapping of tickers to date ranges where the specified metrics are null or NaN.
/// # Arguments:
/// * `df` - The Quantamental DataFrame.
/// * `group_by_cid` - If true, group the blacklist by `cid`. Defaults to true.
/// * `blacklist_name` - The name of the blacklist. Defaults to "BLACKLIST".
/// * `metrics` - The metrics to check for null or NaN values. If None, all metrics are used.
pub fn create_blacklist_from_qdf(
df: &DataFrame,
group_by_cid: Option<bool>,
blacklist_name: Option<String>,
metrics: Option<Vec<String>>,
) -> Result<BTreeMap<String, (String, String)>, Box<dyn Error>> {
check_quantamental_dataframe(df)?;
let metrics = metrics.unwrap_or_else(|| get_unique_metrics(df).unwrap());
let blacklist_name = blacklist_name.unwrap_or_else(|| "BLACKLIST".into());
let group_by_cid = group_by_cid.unwrap_or(true);
let (min_date, max_date) = get_min_max_real_dates(df, "real_date".into())?;
let min_date_str = min_date.format("%Y-%m-%d").to_string();
let max_date_str = max_date.format("%Y-%m-%d").to_string();
// let all_bdates = get_bdates_series_default_opt(min_date_str, max_date_str, None)?;
let all_bdates = get_bdates_list_with_freq(
min_date_str.clone().as_str(),
max_date_str.clone().as_str(),
BDateFreq::Daily,
)?;
// if none of the metrics are null or NaN, return an empty blacklist
if !metrics.iter().any(|metric| {
df.column(metric)
.map(|col| col.is_null().any())
.unwrap_or(false)
}) {
return Ok(BTreeMap::new());
}
// let null_mask = get_nan_mask(df, metrics)?;
// let df = df.filter(&null_mask)?.clone();
let df = df
.clone()
.lazy()
.with_columns([
(cols(metrics.clone()).is_null().or(cols(metrics).is_nan())).alias("null_mask")
])
.filter(col("null_mask"))
// if is now empty, return an empty blacklist
.sort(
["cid", "xcat"],
SortMultipleOptions::default().with_maintain_order(true),
)
.group_by([col("cid"), col("xcat")])
// .agg([col("real_date").sort(SortOptions::default())])
.agg([col("real_date")
.dt()
.strftime("%Y-%m-%d")
.sort(SortOptions::default())])
.select([
concat_str([col("cid"), col("xcat")], "_", true).alias("ticker"),
col("real_date").alias("real_dates"),
])
.collect()?;
// assert!(0 == 1, "{:?}", df);
let ticker_vec = df
.column("ticker")?
.str()?
.into_iter()
.filter_map(|opt| opt.map(|s| s.to_string()))
.collect::<Vec<String>>();
let rdt = get_vec_of_vec_of_dates_from_df(df)?;
let mut blk: HashMap<String, Vec<String>> = HashMap::new();
for (tkr, dates) in ticker_vec.iter().zip(rdt.iter()) {
if group_by_cid {
let _cid = get_cid(tkr.clone())?;
if blk.contains_key(&_cid) {
blk.get_mut(&_cid).unwrap().extend(dates.iter().cloned());
} else {
blk.insert(_cid, dates.clone());
}
} else {
blk.insert(tkr.to_string(), dates.clone());
}
}
for (_key, vals) in blk.iter_mut() {
// order is important - dedup depends on the vec being sorted
vals.sort();
vals.dedup();
}
let all_bdates_strs = all_bdates
.iter()
.map(|date| date.format("%Y-%m-%d").to_string())
.collect::<Vec<String>>();
let mut blacklist: HashMap<String, (String, String)> = HashMap::new();
for (tkr, dates) in blk.iter() {
let date_ranges = convert_dates_list_to_date_ranges(dates.clone(), all_bdates_strs.clone());
for (rng_idx, (start_date, end_date)) in date_ranges.iter() {
let range_key = format!("{}_{}_{}", tkr, blacklist_name.clone(), rng_idx);
blacklist.insert(range_key, (start_date.clone(), end_date.clone()));
}
}
// Ok(blacklist)
let mut btree_map: BTreeMap<String, (String, String)> = BTreeMap::new();
for (key, (start_date, end_date)) in blacklist.iter() {
btree_map.insert(key.clone(), (start_date.clone(), end_date.clone()));
}
Ok(btree_map)
}
/// Get a mask of NaN values for the specified metrics in the DataFrame.
#[allow(dead_code)]
fn get_nan_mask(
df: &DataFrame,
metrics: Vec<String>,
) -> Result<ChunkedArray<BooleanType>, Box<dyn Error>> {
let null_masks: Vec<ChunkedArray<BooleanType>> = metrics
.iter()
.map(|metric| {
let null_mask = df.column(metric.as_str())?.is_null();
let nan_mask = df.column(metric.as_str())?.is_nan()?;
Ok(null_mask | nan_mask)
})
.collect::<Result<_, Box<dyn Error>>>()?;
let null_mask = null_masks
.into_iter()
.reduce(|acc, mask| acc | mask)
.unwrap_or_else(|| BooleanChunked::full_null("null_mask".into(), df.height()));
Ok(null_mask)
}
fn convert_dates_list_to_date_ranges(
blacklist: Vec<String>,
all_bdates_strs: Vec<String>,
) -> HashMap<String, (String, String)> {
// Step 1: Map every date in all_bdates_strs to its index
let mut all_map: HashMap<String, usize> = HashMap::new();
for (i, d) in all_bdates_strs.iter().enumerate() {
all_map.insert(d.clone(), i);
}
// Step 2: Convert each blacklisted date into its index, if it exists
let mut blacklisted_indices: Vec<usize> = Vec::new();
for dt in blacklist {
if let Some(&idx) = all_map.get(&dt) {
blacklisted_indices.push(idx);
}
}
// Step 3: Sort the blacklisted indices
blacklisted_indices.sort_unstable();
// Step 4: Traverse and group consecutive indices into ranges
let mut result: HashMap<i64, (String, String)> = HashMap::new();
let mut string_result: HashMap<String, (String, String)> = HashMap::new();
if blacklisted_indices.is_empty() {
return string_result;
}
let mut range_idx: i64 = 0;
let mut start_idx = blacklisted_indices[0];
let mut end_idx = start_idx;
for &cur_idx in blacklisted_indices.iter().skip(1) {
if cur_idx == end_idx + 1 {
// We are still in a contiguous run
end_idx = cur_idx;
} else {
// We hit a break in contiguity, so store the last range
result.insert(
range_idx,
(
all_bdates_strs[start_idx].clone(),
all_bdates_strs[end_idx].clone(),
),
);
range_idx += 1;
// Start a new range
start_idx = cur_idx;
end_idx = cur_idx;
}
}
// Don't forget to store the final range after the loop
result.insert(
range_idx,
(
all_bdates_strs[start_idx].clone(),
all_bdates_strs[end_idx].clone(),
),
);
let max_digits = result.keys().max().unwrap_or(&-1).to_string().len();
for (key, (start_date, end_date)) in result.iter() {
let new_key = format!("{:0width$}", key, width = max_digits);
string_result.insert(new_key, (start_date.clone(), end_date.clone()));
}
string_result
}
fn get_vec_of_vec_of_dates_from_df(df: DataFrame) -> Result<Vec<Vec<String>>, Box<dyn Error>> {
let rdt = df
.column("real_dates")?
// .clone()
.as_series()
.unwrap()
.list()?
.into_iter()
.filter_map(|opt| opt)
.collect::<Vec<Series>>()
.iter()
.map(|s| {
s.str()
.unwrap()
.into_iter()
.filter_map(|opt| opt.map(|s| s.to_string()))
.collect::<Vec<String>>()
})
.collect::<Vec<Vec<String>>>();
Ok(rdt)
}
#[allow(dead_code)]
fn get_vec_of_vec_of_naivedates_from_df(
df: DataFrame,
) -> Result<Vec<Vec<NaiveDate>>, Box<dyn Error>> {
let rdt = df
.column("real_dates")?
// .clone()
.as_series()
.unwrap()
.list()?
.into_iter()
.filter_map(|opt| opt)
.collect::<Vec<Series>>()
.iter()
.map(|s| {
s.date()
.unwrap()
.into_iter()
.filter_map(|opt| opt.and_then(|date| NaiveDate::from_num_days_from_ce_opt(date)))
.collect::<Vec<NaiveDate>>()
})
.collect::<Vec<Vec<NaiveDate>>>();
Ok(rdt)
}
// fn get_vec_of_vec_of_dates_from_df(df: DataFrame) -> Result<Vec<Vec<String>>, Box<dyn Error>> {
// let real_dates_column = df.column("real_dates")?.clone();
// let series = real_dates_column.as_series().unwrap().clone();
// let rdt = series.list()?.clone();
// let rdt = rdt
// .into_iter()
// .filter_map(|opt| opt)
// .collect::<Vec<Series>>();
// let rdt = rdt
// .iter()
// .map(|s| {
// s.str()
// .unwrap()
// .into_iter()
// .filter_map(|opt| opt.map(|s| s.to_string()))
// .collect::<Vec<String>>()
// })
// .collect::<Vec<Vec<String>>>();
// Ok(rdt)
// }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_convert_dates_list_to_date_ranges() {
let all_dates = vec![
"2023-01-01".to_string(),
"2023-01-02".to_string(),
"2023-01-03".to_string(),
"2023-01-04".to_string(),
"2023-01-05".to_string(),
"2023-01-06".to_string(),
];
let blacklist = vec![
"2023-01-02".to_string(),
"2023-01-03".to_string(),
"2023-01-05".to_string(),
];
let result = convert_dates_list_to_date_ranges(blacklist, all_dates);
// Expect two ranges:
// range 0 => ("2023-01-02", "2023-01-03")
// range 1 => ("2023-01-05", "2023-01-05")
assert_eq!(
result["0"],
("2023-01-02".to_string(), "2023-01-03".to_string())
);
assert_eq!(
result["1"],
("2023-01-05".to_string(), "2023-01-05".to_string())
);
}
}

View File

@@ -17,15 +17,14 @@ use std::error::Error;
pub fn check_quantamental_dataframe(df: &DataFrame) -> Result<(), Box<dyn Error>> {
let expected_cols = ["real_date", "cid", "xcat"];
let expected_dtype = [DataType::Date, DataType::String, DataType::String];
let err = "Quantamental DataFrame must have at least 4 columns: 'real_date', 'cid', 'xcat' and one or more metrics.";
for (col, dtype) in expected_cols.iter().zip(expected_dtype.iter()) {
let col = df.column(col);
if col.is_err() {
return Err(format!("{} Column {:?} not found", err, col).into());
return Err(format!("Column {:?} not found", col).into());
}
let col = col?;
if col.dtype() != dtype {
return Err(format!("{} Column {:?} has wrong dtype", err, col).into());
return Err(format!("Column {:?} has wrong dtype", col).into());
}
}
Ok(())

View File

@@ -1,12 +1,11 @@
pub mod blacklist;
pub mod core;
pub mod load;
pub mod pivots;
pub mod reduce_df;
pub mod update_df;
pub mod load;
pub mod reduce_df;
pub mod pivots;
// Re-export submodules for easier access
pub use core::*;
pub use update_df::*;
pub use load::*;
pub use reduce_df::*;
pub use update_df::*;

View File

@@ -30,12 +30,12 @@ pub fn reduce_dataframe(
let df_size = df.shape();
let mut new_df = df.clone();
let ticker_col = get_ticker_column_for_quantamental_dataframe(&new_df)?;
let ticker_col: Column = get_ticker_column_for_quantamental_dataframe(&new_df)?;
// if cids is not provided, get all unique cids
let u_cids = get_unique_cids(&new_df)?;
let u_xcats = get_unique_xcats(&new_df)?;
let u_tickers = _get_unique_strs_from_str_column_object(&ticker_col)?;
let u_cids: Vec<String> = get_unique_cids(&new_df)?;
let u_xcats: Vec<String> = get_unique_xcats(&new_df)?;
let u_tickers: Vec<String> = _get_unique_strs_from_str_column_object(&ticker_col)?;
let cids_vec = cids.unwrap_or_else(|| u_cids.clone());
let specified_cids: Vec<&str> = cids_vec.iter().map(AsRef::as_ref).collect();