mirror of
https://github.com/Magnus167/msyrs.git
synced 2025-11-19 15:46:12 +00:00
Compare commits
35 Commits
c0f7299643
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1275e7c2c9 | ||
|
|
0e4d58a9d8 | ||
|
|
d16764944b | ||
|
|
80603aa951 | ||
|
|
fb2efa99ac | ||
|
|
178de83d1a | ||
|
|
ee531deb7a | ||
|
|
0443906cc9 | ||
|
|
f0a9242d10 | ||
|
|
df22667d63 | ||
|
|
7d4c198067 | ||
|
|
25192e425d | ||
|
|
e602b8b2b4 | ||
|
|
5d2ff3b88d | ||
|
|
3d2afa01a8 | ||
|
|
ed9d5d01a2 | ||
|
|
dda6e3e12f | ||
|
|
5ef2c7e6c7 | ||
|
|
5c3862c297 | ||
|
|
3559a90ad2 | ||
|
|
b7368a366e | ||
|
|
24a4176e17 | ||
|
|
165e1c19e4 | ||
|
|
fefe849394 | ||
| 4e4d1c6625 | |||
|
|
cfbd54be7a | ||
|
|
a4645dbc93 | ||
|
|
658eb0d121 | ||
|
|
93f88ab537 | ||
|
|
39d1a1b632 | ||
|
|
28ff5c95cf | ||
|
|
8343e93b46 | ||
|
|
328fb24509 | ||
|
|
3f8a2b7c0c | ||
|
|
c70dc11abd |
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
notebooks/** linguist-vendored
|
||||||
37
Dockerfile
Normal file
37
Dockerfile
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
FROM debian:bookworm-slim AS base
|
||||||
|
|
||||||
|
RUN apt update && \
|
||||||
|
apt install -y build-essential curl git python3 pkg-config libssl-dev \
|
||||||
|
&& apt clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Get Rust
|
||||||
|
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
|
||||||
|
RUN echo 'source $HOME/.cargo/env' >> $HOME/.bashrc
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
ADD https://astral.sh/uv/install.sh /uv-installer.sh
|
||||||
|
RUN sh /uv-installer.sh && rm /uv-installer.sh
|
||||||
|
|
||||||
|
ENV PATH="/root/.local/bin/:$PATH"
|
||||||
|
|
||||||
|
# RUN python3 -m pip install uv --break-system-packages
|
||||||
|
RUN uv venv
|
||||||
|
# /app/.venv/bin/python
|
||||||
|
ENV PATH="/app/.venv/bin/:$PATH"
|
||||||
|
RUN uv pip install "maturin[patchelf]"
|
||||||
|
|
||||||
|
ADD src /app/src
|
||||||
|
ADD Cargo.toml /app/Cargo.toml
|
||||||
|
ADD README.md /app/README.md
|
||||||
|
ADD pyproject.toml /app/pyproject.toml
|
||||||
|
|
||||||
|
RUN cp ./src/msyrs.pyi ./msyrs.pyi
|
||||||
|
|
||||||
|
VOLUME /app/build
|
||||||
|
|
||||||
|
# CMD ["/bin/bash"]
|
||||||
|
# CMD ["maturin", "build", "--release", "--out", "/app/build"]
|
||||||
|
# RUN maturin build --release --out ./build
|
||||||
|
# RUN uv build --sdist --wheel --out-dir ./build
|
||||||
@@ -4,7 +4,14 @@ A Rust implementation of the [Macrosynergy Python Package](https://github.com/ma
|
|||||||
|
|
||||||
## Build and install the Python package
|
## Build and install the Python package
|
||||||
|
|
||||||
|
Install Cargo:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl https://sh.rustup.rs -sSf | sh
|
||||||
|
```
|
||||||
|
|
||||||
Create a virtual environment:
|
Create a virtual environment:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -m venv .venv
|
python -m venv .venv
|
||||||
```
|
```
|
||||||
@@ -21,6 +28,7 @@ uv pip install .
|
|||||||
```
|
```
|
||||||
|
|
||||||
Install from uv + Git (if authenticated):
|
Install from uv + Git (if authenticated):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv pip install git+https://github.com/Magnus167/msyrs@main
|
uv pip install git+https://github.com/Magnus167/msyrs@main
|
||||||
```
|
```
|
||||||
|
|||||||
240
notebooks/funcwise/basic-utils.ipynb
vendored
240
notebooks/funcwise/basic-utils.ipynb
vendored
File diff suppressed because one or more lines are too long
360
notebooks/funcwise/bdate_range_util.ipynb
vendored
Normal file
360
notebooks/funcwise/bdate_range_util.ipynb
vendored
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# ! uv pip install E:\\Work\\ruzt\\msyrs --upgrade"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Import Python packages\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import macrosynergy\n",
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"import polars as pl\n",
|
||||||
|
"import os\n",
|
||||||
|
"import time\n",
|
||||||
|
"\n",
|
||||||
|
"from macrosynergy.panel import view_timelines\n",
|
||||||
|
"from macrosynergy.management.types import QuantamentalDataFrame\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"### Import Python bindings - `msyrs`\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import msyrs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<div>\n",
|
||||||
|
"<style scoped>\n",
|
||||||
|
" .dataframe tbody tr th:only-of-type {\n",
|
||||||
|
" vertical-align: middle;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe tbody tr th {\n",
|
||||||
|
" vertical-align: top;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe thead th {\n",
|
||||||
|
" text-align: right;\n",
|
||||||
|
" }\n",
|
||||||
|
"</style>\n",
|
||||||
|
"<table border=\"1\" class=\"dataframe\">\n",
|
||||||
|
" <thead>\n",
|
||||||
|
" <tr style=\"text-align: right;\">\n",
|
||||||
|
" <th></th>\n",
|
||||||
|
" <th>bdates</th>\n",
|
||||||
|
" <th>0</th>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </thead>\n",
|
||||||
|
" <tbody>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>0</th>\n",
|
||||||
|
" <td>2000-01-03</td>\n",
|
||||||
|
" <td>2000-01-03</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1</th>\n",
|
||||||
|
" <td>2000-01-10</td>\n",
|
||||||
|
" <td>2000-01-10</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>2</th>\n",
|
||||||
|
" <td>2000-01-17</td>\n",
|
||||||
|
" <td>2000-01-17</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>3</th>\n",
|
||||||
|
" <td>2000-01-24</td>\n",
|
||||||
|
" <td>2000-01-24</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>4</th>\n",
|
||||||
|
" <td>2000-01-31</td>\n",
|
||||||
|
" <td>2000-01-31</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>...</th>\n",
|
||||||
|
" <td>...</td>\n",
|
||||||
|
" <td>...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1056</th>\n",
|
||||||
|
" <td>2020-03-30</td>\n",
|
||||||
|
" <td>2020-03-30</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1057</th>\n",
|
||||||
|
" <td>2020-04-06</td>\n",
|
||||||
|
" <td>2020-04-06</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1058</th>\n",
|
||||||
|
" <td>2020-04-13</td>\n",
|
||||||
|
" <td>2020-04-13</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1059</th>\n",
|
||||||
|
" <td>2020-04-20</td>\n",
|
||||||
|
" <td>2020-04-20</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1060</th>\n",
|
||||||
|
" <td>2020-04-27</td>\n",
|
||||||
|
" <td>2020-04-27</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </tbody>\n",
|
||||||
|
"</table>\n",
|
||||||
|
"<p>1061 rows × 2 columns</p>\n",
|
||||||
|
"</div>"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
" bdates 0\n",
|
||||||
|
"0 2000-01-03 2000-01-03\n",
|
||||||
|
"1 2000-01-10 2000-01-10\n",
|
||||||
|
"2 2000-01-17 2000-01-17\n",
|
||||||
|
"3 2000-01-24 2000-01-24\n",
|
||||||
|
"4 2000-01-31 2000-01-31\n",
|
||||||
|
"... ... ...\n",
|
||||||
|
"1056 2020-03-30 2020-03-30\n",
|
||||||
|
"1057 2020-04-06 2020-04-06\n",
|
||||||
|
"1058 2020-04-13 2020-04-13\n",
|
||||||
|
"1059 2020-04-20 2020-04-20\n",
|
||||||
|
"1060 2020-04-27 2020-04-27\n",
|
||||||
|
"\n",
|
||||||
|
"[1061 rows x 2 columns]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"x = msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='W').to_pandas()\n",
|
||||||
|
"y = pd.Series(pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='W-MON'))\n",
|
||||||
|
"\n",
|
||||||
|
"pd.concat([x, y], axis=1)\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Results for M\t & \tBMS\t are exactly the same\n",
|
||||||
|
"Results for Q\t & \tBQS\t are exactly the same\n",
|
||||||
|
"Results for W\t & \tW-MON\t are exactly the same\n",
|
||||||
|
"Results for WF\t & \tW-FRI\t are exactly the same\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"for rs_freq, pd_freq in [('M', 'BMS'), ('Q', 'BQS'), ('W', 'W-MON'), ('WF', 'W-FRI')]:\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" x = msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq=rs_freq).to_pandas()\n",
|
||||||
|
" y = pd.Series(pd.bdate_range(start='2000-01-01', end='2020-05-01', freq=pd_freq))\n",
|
||||||
|
"\n",
|
||||||
|
" e = x == y\n",
|
||||||
|
" res = e.all()\n",
|
||||||
|
" non_matching_df = pd.concat([x[~e], y[~e]], axis=1)\n",
|
||||||
|
" assert res, f\"Results for {rs_freq}\\t and \\t{pd_freq}\\t are not the same\\n{non_matching_df}\"\n",
|
||||||
|
" print(f\"Results for {rs_freq}\\t & \\t{pd_freq}\\t are exactly the same\")\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"23.5 μs ± 1.02 μs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n",
|
||||||
|
"67.4 μs ± 979 ns per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n",
|
||||||
|
"1.97 ms ± 57.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
|
||||||
|
"4.65 ms ± 170 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
|
||||||
|
"28.3 ms ± 898 μs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n",
|
||||||
|
"93.8 ms ± 2.02 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='D')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='D')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='D')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='D')\n",
|
||||||
|
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='B')\n",
|
||||||
|
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='B')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"7.95 μs ± 146 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"17.9 μs ± 108 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"1.73 ms ± 20.8 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
|
||||||
|
"4 ms ± 69.3 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
|
||||||
|
"5.69 ms ± 139 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
|
||||||
|
"19.1 ms ± 268 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='WF')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='WF')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='WF')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='WF')\n",
|
||||||
|
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='W-FRI')\n",
|
||||||
|
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='W-FRI')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"6.9 μs ± 126 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"13.1 μs ± 93.3 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"1.73 ms ± 29.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
|
||||||
|
"4.2 ms ± 81.5 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
|
||||||
|
"931 μs ± 14.2 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
|
||||||
|
"3.05 ms ± 47.5 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='ME')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='ME')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='ME')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='ME')\n",
|
||||||
|
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='BME')\n",
|
||||||
|
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='BME')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"3.65 μs ± 69.1 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"4.78 μs ± 38.7 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"1.73 ms ± 122 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
|
||||||
|
"4.16 ms ± 286 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
|
||||||
|
"340 μs ± 11.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n",
|
||||||
|
"1.1 ms ± 11.5 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='Q')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='Q')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='Q')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='Q')\n",
|
||||||
|
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='BQS')\n",
|
||||||
|
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='BQS')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"3.21 μs ± 83.4 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"3.66 μs ± 198 ns per loop (mean ± std. dev. of 7 runs, 100,000 loops each)\n",
|
||||||
|
"2.67 ms ± 459 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
|
||||||
|
"3.71 ms ± 143 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n",
|
||||||
|
"98.7 μs ± 1.47 μs per loop (mean ± std. dev. of 7 runs, 10,000 loops each)\n",
|
||||||
|
"289 μs ± 15.3 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='2000-01-01', end_date='2020-05-01', freq='YE')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_opt(start_date='1971-01-01', end_date='2040-05-01', freq='YE')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='2000-01-01', end_date='2020-05-01', freq='YE')\n",
|
||||||
|
"%timeit msyrs.utils.get_bdates_series_default_pl(start_date='1971-01-01', end_date='2040-05-01', freq='YE')\n",
|
||||||
|
"%timeit pd.bdate_range(start='2000-01-01', end='2020-05-01', freq='BYE')\n",
|
||||||
|
"%timeit pd.bdate_range(start='1971-01-01', end='2040-05-01', freq='BYE')"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": ".venv",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.12.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 4
|
||||||
|
}
|
||||||
260
notebooks/funcwise/linear_composites.ipynb
vendored
260
notebooks/funcwise/linear_composites.ipynb
vendored
File diff suppressed because one or more lines are too long
@@ -5,7 +5,7 @@ build-backend = "maturin"
|
|||||||
[project]
|
[project]
|
||||||
name = "msyrs"
|
name = "msyrs"
|
||||||
version = "0.0.1"
|
version = "0.0.1"
|
||||||
requires-python = ">=3.7"
|
requires-python = ">=3.8"
|
||||||
|
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"macrosynergy>=1.2.0",
|
"macrosynergy>=1.2.0",
|
||||||
|
|||||||
33
scripts/unix/build.sh
Normal file
33
scripts/unix/build.sh
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Exit immediately if a command exits with a non-zero status
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Run "maturin --help". If it fails, print an error message and exit.
|
||||||
|
if ! maturin --help > /dev/null 2>&1; then
|
||||||
|
echo "Failed to run maturin --help" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Delete any existing build directory and create a new one.
|
||||||
|
rm -rf ./build
|
||||||
|
mkdir -p ./build
|
||||||
|
|
||||||
|
# Copy ./src/msyrs.pyi to ./msyrs.pyi.
|
||||||
|
cp ./src/msyrs.pyi ./msyrs.pyi
|
||||||
|
|
||||||
|
# Build using maturin.
|
||||||
|
maturin build --release --sdist --out ./build/
|
||||||
|
|
||||||
|
# Get the first wheel file found in the build directory.
|
||||||
|
whl_file=$(ls ./build/*.whl 2>/dev/null | head -n 1)
|
||||||
|
if [ -z "$whl_file" ]; then
|
||||||
|
echo "No wheel file found in ./build" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Rename the wheel file from .whl to .zip.
|
||||||
|
base_name="${whl_file%.whl}"
|
||||||
|
mv "$whl_file" "${base_name}.zip"
|
||||||
|
|
||||||
|
# Delete the temporary .pyi file.
|
||||||
|
rm ./msyrs.pyi
|
||||||
20
scripts/unix/install.sh
Normal file
20
scripts/unix/install.sh
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Ensure maturin is installed. For example, you can install it via:
|
||||||
|
# pip install maturin
|
||||||
|
|
||||||
|
# Run "maturin --help". If it fails, print an error message and exit.
|
||||||
|
if ! maturin --help > /dev/null 2>&1; then
|
||||||
|
echo "Failed to run maturin --help" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Copy ./src/msyrs.pyi to the current directory as msyrs.pyi
|
||||||
|
cp ./src/msyrs.pyi ./msyrs.pyi
|
||||||
|
|
||||||
|
# Run maturin develop in release mode.
|
||||||
|
maturin develop --release
|
||||||
|
|
||||||
|
# Delete the temporary msyrs.pyi file.
|
||||||
|
rm ./msyrs.pyi
|
||||||
@@ -68,7 +68,7 @@ pub fn get_period_indices_hv(dfw: PyDataFrame, est_freq: &str) -> PyResult<Vec<u
|
|||||||
cids,
|
cids,
|
||||||
weights = None,
|
weights = None,
|
||||||
signs = None,
|
signs = None,
|
||||||
weight_xcats = None,
|
weight_xcat = None,
|
||||||
normalize_weights = false,
|
normalize_weights = false,
|
||||||
start = None,
|
start = None,
|
||||||
end = None,
|
end = None,
|
||||||
@@ -84,7 +84,7 @@ pub fn linear_composite(
|
|||||||
cids: Vec<String>,
|
cids: Vec<String>,
|
||||||
weights: Option<Vec<f64>>,
|
weights: Option<Vec<f64>>,
|
||||||
signs: Option<Vec<f64>>,
|
signs: Option<Vec<f64>>,
|
||||||
weight_xcats: Option<Vec<String>>,
|
weight_xcat: Option<String>,
|
||||||
normalize_weights: bool,
|
normalize_weights: bool,
|
||||||
start: Option<String>,
|
start: Option<String>,
|
||||||
end: Option<String>,
|
end: Option<String>,
|
||||||
@@ -101,7 +101,7 @@ pub fn linear_composite(
|
|||||||
cids,
|
cids,
|
||||||
weights,
|
weights,
|
||||||
signs,
|
signs,
|
||||||
weight_xcats,
|
weight_xcat,
|
||||||
normalize_weights,
|
normalize_weights,
|
||||||
start,
|
start,
|
||||||
end,
|
end,
|
||||||
|
|||||||
@@ -1,22 +1,62 @@
|
|||||||
use pyo3::prelude::*;
|
use pyo3::{prelude::*, types::PyDict};
|
||||||
use pyo3_polars::{PyDataFrame, PySeries};
|
use pyo3_polars::{PyDataFrame, PySeries};
|
||||||
|
|
||||||
/// Python wrapper for [`crate::utils::qdf`] module.
|
/// Python wrapper for [`crate::utils::qdf`] module.
|
||||||
#[allow(deprecated)]
|
#[allow(deprecated)]
|
||||||
#[pymodule]
|
#[pymodule]
|
||||||
pub fn utils(_py: Python, m: &PyModule) -> PyResult<()> {
|
pub fn utils(_py: Python, m: &PyModule) -> PyResult<()> {
|
||||||
m.add_function(wrap_pyfunction!(get_bdates_series_default, m)?)?;
|
m.add_function(wrap_pyfunction!(get_bdates_series_default_pl, m)?)?;
|
||||||
|
m.add_function(wrap_pyfunction!(get_bdates_series_default_opt, m)?)?;
|
||||||
|
m.add_function(wrap_pyfunction!(create_blacklist_from_qdf, m)?)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pyfunction]
|
#[pyfunction]
|
||||||
pub fn get_bdates_series_default(
|
pub fn get_bdates_series_default_pl(
|
||||||
start_date: String,
|
start_date: String,
|
||||||
end_date: String,
|
end_date: String,
|
||||||
freq: Option<String>,
|
freq: Option<String>,
|
||||||
) -> PyResult<PySeries> {
|
) -> PyResult<PySeries> {
|
||||||
Ok(PySeries(
|
Ok(PySeries(
|
||||||
crate::utils::dateutils::get_bdates_series_default(start_date, end_date, freq)
|
crate::utils::dateutils::get_bdates_series_default_pl(start_date, end_date, freq)
|
||||||
.map_err(|e| PyErr::new::<pyo3::exceptions::PyValueError, _>(format!("{}", e)))?,
|
.map_err(|e| PyErr::new::<pyo3::exceptions::PyValueError, _>(format!("{}", e)))?,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[pyfunction]
|
||||||
|
pub fn get_bdates_series_default_opt(
|
||||||
|
start_date: String,
|
||||||
|
end_date: String,
|
||||||
|
freq: Option<String>,
|
||||||
|
) -> PyResult<PySeries> {
|
||||||
|
Ok(PySeries(
|
||||||
|
crate::utils::dateutils::get_bdates_series_default_opt(start_date, end_date, freq)
|
||||||
|
.map_err(|e| PyErr::new::<pyo3::exceptions::PyValueError, _>(format!("{}", e)))?,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(deprecated)]
|
||||||
|
#[pyfunction(signature = (df, group_by_cid=None, blacklist_name=None, metrics=None))]
|
||||||
|
pub fn create_blacklist_from_qdf(
|
||||||
|
df: PyDataFrame,
|
||||||
|
group_by_cid: Option<bool>,
|
||||||
|
blacklist_name: Option<String>,
|
||||||
|
metrics: Option<Vec<String>>,
|
||||||
|
) -> PyResult<PyObject> {
|
||||||
|
let result = crate::utils::qdf::blacklist::create_blacklist_from_qdf(
|
||||||
|
&df.into(),
|
||||||
|
group_by_cid,
|
||||||
|
blacklist_name,
|
||||||
|
metrics,
|
||||||
|
)
|
||||||
|
.map_err(|e| PyErr::new::<pyo3::exceptions::PyValueError, _>(format!("{}", e)))?;
|
||||||
|
Python::with_gil(|py| {
|
||||||
|
let dict = PyDict::new(py);
|
||||||
|
// for (key, (start_date, end_date)) in result {
|
||||||
|
// dict.set_item(key, (start_date, end_date))
|
||||||
|
for (key, dates) in result {
|
||||||
|
dict.set_item(key, dates).map_err(|e| PyErr::from(e))?;
|
||||||
|
}
|
||||||
|
Ok(dict.into())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ fn all_jpmaq_expressions(expressions: Vec<String>) -> bool {
|
|||||||
///
|
///
|
||||||
/// Example Usage:
|
/// Example Usage:
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```ignore
|
||||||
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
|
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
|
||||||
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
|
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
|
||||||
///
|
///
|
||||||
@@ -102,7 +102,7 @@ impl Default for JPMaQSDownloadGetIndicatorArgs {
|
|||||||
/// Struct for downloading data from the JPMaQS data from JPMorgan DataQuery API.
|
/// Struct for downloading data from the JPMaQS data from JPMorgan DataQuery API.
|
||||||
///
|
///
|
||||||
/// ## Example Usage
|
/// ## Example Usage
|
||||||
/// ```rust
|
/// ```ignore
|
||||||
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
|
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
|
||||||
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
|
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
|
||||||
/// use polars::prelude::*;
|
/// use polars::prelude::*;
|
||||||
@@ -277,7 +277,7 @@ impl JPMaQSDownload {
|
|||||||
///
|
///
|
||||||
/// Usage:
|
/// Usage:
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```ignore
|
||||||
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
|
/// use msyrs::download::jpmaqsdownload::JPMaQSDownload;
|
||||||
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
|
/// use msyrs::download::jpmaqsdownload::JPMaQSDownloadGetIndicatorArgs;
|
||||||
/// let mut jpamqs_download = JPMaQSDownload::default();
|
/// let mut jpamqs_download = JPMaQSDownload::default();
|
||||||
|
|||||||
@@ -51,6 +51,10 @@ class panel:
|
|||||||
def linear_composite(*args, **kwargs) -> DataFrame: ...
|
def linear_composite(*args, **kwargs) -> DataFrame: ...
|
||||||
|
|
||||||
class utils:
|
class utils:
|
||||||
__all__ = ["get_bdates_series_default"]
|
__all__ = ["get_bdates_series_default", "get_bdates_series_default_opt"]
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_bdates_series_default(*args, **kwargs) -> Series: ...
|
def get_bdates_series_default_pl(*args, **kwargs) -> Series: ...
|
||||||
|
@staticmethod
|
||||||
|
def get_bdates_series_default_opt(*args, **kwargs) -> Series: ...
|
||||||
|
@staticmethod
|
||||||
|
def create_blacklist_from_qdf(*args, **kwargs) -> dict: ...
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::utils::dateutils::{get_bdates_from_col, get_min_max_real_dates};
|
use crate::utils::dateutils::{get_bdates_from_col, get_min_max_real_dates};
|
||||||
use crate::utils::qdf::pivots::*;
|
use crate::utils::qdf::pivots::*;
|
||||||
use crate::utils::qdf::reduce_df::*;
|
use crate::utils::qdf::reduce_dataframe;
|
||||||
use chrono::NaiveDate;
|
use chrono::NaiveDate;
|
||||||
use ndarray::{s, Array, Array1, Zip};
|
use ndarray::{s, Array, Array1, Zip};
|
||||||
use polars::prelude::*;
|
use polars::prelude::*;
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
use crate::utils::qdf::check_quantamental_dataframe;
|
use crate::utils::qdf::check_quantamental_dataframe;
|
||||||
use crate::utils::qdf::pivots::*;
|
use crate::utils::qdf::pivots::{pivot_dataframe_by_ticker, pivot_wide_dataframe_to_qdf};
|
||||||
use crate::utils::qdf::reduce_df::*;
|
use crate::utils::qdf::reduce_df::reduce_dataframe;
|
||||||
use polars::prelude::*;
|
use polars::prelude::*;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
const TOLERANCE: f64 = 1e-8;
|
const TOLERANCE: f64 = 1e-8;
|
||||||
@@ -108,14 +108,42 @@ fn _form_agg_nan_mask_series(nan_mask_dfw: &DataFrame) -> Result<Series, PolarsE
|
|||||||
Ok(combined.into_series())
|
Ok(combined.into_series())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Form the weights DataFrame
|
||||||
fn _form_agg_weights_dfw(
|
fn _form_agg_weights_dfw(
|
||||||
agg_weights_map: &HashMap<String, Vec<f64>>,
|
agg_weights_map: &HashMap<String, (WeightValue, f64)>,
|
||||||
data_dfw: DataFrame,
|
dfw: &DataFrame,
|
||||||
) -> Result<DataFrame, PolarsError> {
|
) -> Result<DataFrame, PolarsError> {
|
||||||
let mut weights_dfw = DataFrame::new(vec![])?;
|
let mut weights_dfw = DataFrame::new(vec![])?;
|
||||||
for (agg_targ, weight_signs) in agg_weights_map.iter() {
|
for (agg_targ, weight_signs) in agg_weights_map.iter() {
|
||||||
let wgt = weight_signs[0] * weight_signs[1];
|
// let wgt = weight_signs[0] * weight_signs[1];
|
||||||
let wgt_series = Series::new(agg_targ.into(), vec![wgt; data_dfw.height()]);
|
let wgt_series = match &weight_signs.0 {
|
||||||
|
WeightValue::F64(val) => {
|
||||||
|
let wgt = val * weight_signs.1;
|
||||||
|
Series::new(agg_targ.into(), vec![wgt; dfw.height()])
|
||||||
|
}
|
||||||
|
WeightValue::Str(vstr) => {
|
||||||
|
// vstr column from data_dfw, else raise wieght specification error
|
||||||
|
if !dfw.get_column_names().contains(&&PlSmallStr::from(vstr)) {
|
||||||
|
return Err(PolarsError::ComputeError(
|
||||||
|
format!(
|
||||||
|
"The column {} does not exist in the DataFrame. {:?}",
|
||||||
|
vstr, agg_weights_map
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let vstr_series = dfw.column(vstr)?;
|
||||||
|
let multiplied_series = vstr_series * weight_signs.1;
|
||||||
|
let mut multiplied_series =
|
||||||
|
multiplied_series.as_series().cloned().ok_or_else(|| {
|
||||||
|
PolarsError::ComputeError(
|
||||||
|
"Failed to convert multiplied_series to Series".into(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
multiplied_series.rename(agg_targ.into());
|
||||||
|
multiplied_series
|
||||||
|
}
|
||||||
|
};
|
||||||
weights_dfw.with_column(wgt_series)?;
|
weights_dfw.with_column(wgt_series)?;
|
||||||
}
|
}
|
||||||
Ok(weights_dfw)
|
Ok(weights_dfw)
|
||||||
@@ -143,14 +171,14 @@ fn perform_single_group_agg(
|
|||||||
dfw: &DataFrame,
|
dfw: &DataFrame,
|
||||||
agg_on: &String,
|
agg_on: &String,
|
||||||
agg_targs: &Vec<String>,
|
agg_targs: &Vec<String>,
|
||||||
agg_weights_map: &HashMap<String, Vec<f64>>,
|
agg_weights_map: &HashMap<String, (WeightValue, f64)>,
|
||||||
normalize_weights: bool,
|
normalize_weights: bool,
|
||||||
complete: bool,
|
complete: bool,
|
||||||
) -> Result<Column, PolarsError> {
|
) -> Result<Column, PolarsError> {
|
||||||
let data_dfw = _form_agg_data_dfw(dfw, agg_targs)?;
|
let data_dfw = _form_agg_data_dfw(dfw, agg_targs)?;
|
||||||
let nan_mask_dfw = _form_agg_nan_mask_dfw(&data_dfw)?;
|
let nan_mask_dfw = _form_agg_nan_mask_dfw(&data_dfw)?;
|
||||||
let nan_mask_series = _form_agg_nan_mask_series(&nan_mask_dfw)?;
|
let nan_mask_series = _form_agg_nan_mask_series(&nan_mask_dfw)?;
|
||||||
let weights_dfw = _form_agg_weights_dfw(agg_weights_map, data_dfw.clone())?;
|
let weights_dfw = _form_agg_weights_dfw(agg_weights_map, dfw)?;
|
||||||
let weights_dfw = match normalize_weights {
|
let weights_dfw = match normalize_weights {
|
||||||
true => normalize_weights_with_nan_mask(weights_dfw, nan_mask_dfw)?,
|
true => normalize_weights_with_nan_mask(weights_dfw, nan_mask_dfw)?,
|
||||||
false => weights_dfw,
|
false => weights_dfw,
|
||||||
@@ -192,7 +220,7 @@ fn perform_single_group_agg(
|
|||||||
fn perform_multiplication(
|
fn perform_multiplication(
|
||||||
dfw: &DataFrame,
|
dfw: &DataFrame,
|
||||||
mult_targets: &HashMap<String, Vec<String>>,
|
mult_targets: &HashMap<String, Vec<String>>,
|
||||||
weights_map: &HashMap<String, HashMap<String, Vec<f64>>>,
|
weights_map: &HashMap<String, HashMap<String, (WeightValue, f64)>>,
|
||||||
complete: bool,
|
complete: bool,
|
||||||
normalize_weights: bool,
|
normalize_weights: bool,
|
||||||
) -> Result<DataFrame, PolarsError> {
|
) -> Result<DataFrame, PolarsError> {
|
||||||
@@ -200,6 +228,7 @@ fn perform_multiplication(
|
|||||||
// let mut new_dfw = DataFrame::new(vec![real_date])?;
|
// let mut new_dfw = DataFrame::new(vec![real_date])?;
|
||||||
let mut new_dfw = DataFrame::new(vec![])?;
|
let mut new_dfw = DataFrame::new(vec![])?;
|
||||||
assert!(!mult_targets.is_empty(), "agg_targs is empty");
|
assert!(!mult_targets.is_empty(), "agg_targs is empty");
|
||||||
|
|
||||||
for (agg_on, agg_targs) in mult_targets.iter() {
|
for (agg_on, agg_targs) in mult_targets.iter() {
|
||||||
// perform_single_group_agg
|
// perform_single_group_agg
|
||||||
let cols_len = new_dfw.get_column_names().len();
|
let cols_len = new_dfw.get_column_names().len();
|
||||||
@@ -288,76 +317,122 @@ fn get_mul_targets(
|
|||||||
Ok(mul_targets)
|
Ok(mul_targets)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Builds a map of the shape:
|
||||||
|
/// `HashMap<String, HashMap<String, (WeightValue, f64)>>`
|
||||||
|
/// where only one of `weights` or `weight_xcats` can be provided.
|
||||||
|
/// If neither is provided, weights default to 1.0.
|
||||||
|
/// Each tuple is `(WeightValue, f64) = (weight, sign)`.
|
||||||
fn form_weights_and_signs_map(
|
fn form_weights_and_signs_map(
|
||||||
cids: Vec<String>,
|
cids: Vec<String>,
|
||||||
xcats: Vec<String>,
|
xcats: Vec<String>,
|
||||||
weights: Option<Vec<f64>>,
|
weights: Option<Vec<f64>>,
|
||||||
|
weight_xcat: Option<String>,
|
||||||
signs: Option<Vec<f64>>,
|
signs: Option<Vec<f64>>,
|
||||||
) -> Result<HashMap<String, HashMap<String, Vec<f64>>>, Box<dyn std::error::Error>> {
|
) -> Result<HashMap<String, HashMap<String, (WeightValue, f64)>>, Box<dyn std::error::Error>> {
|
||||||
let _agg_xcats_for_cid = agg_xcats_for_cid(cids.clone(), xcats.clone());
|
// For demonstration, we pretend to load or infer these from helpers:
|
||||||
|
let agg_xcats_for_cid = agg_xcats_for_cid(cids.clone(), xcats.clone());
|
||||||
let (agg_on, agg_targ) = get_agg_on_agg_targs(cids.clone(), xcats.clone());
|
let (agg_on, agg_targ) = get_agg_on_agg_targs(cids.clone(), xcats.clone());
|
||||||
|
|
||||||
// if weights are None, create a vector of 1s of the same length as agg_targ
|
// Determine if each weight option has non-empty values.
|
||||||
let weights = weights.unwrap_or(vec![1.0 / agg_targ.len() as f64; agg_targ.len()]);
|
let weights_provided = weights.as_ref().map_or(false, |v| !v.is_empty());
|
||||||
let signs = signs.unwrap_or(vec![1.0; agg_targ.len()]);
|
let weight_xcats_provided = weight_xcat.as_ref().map_or(false, |v| !v.is_empty());
|
||||||
|
|
||||||
// check that the lengths of weights and signs match the length of agg_targ
|
// Enforce that only one of weights or weight_xcats is specified.
|
||||||
check_weights_signs_lengths(
|
if weights_provided && weight_xcats_provided {
|
||||||
weights.clone(),
|
return Err("Only one of `weights` and `weight_xcats` may be specified.".into());
|
||||||
signs.clone(),
|
}
|
||||||
_agg_xcats_for_cid,
|
|
||||||
agg_targ.len(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut weights_map = HashMap::new();
|
// 1) Build the "actual_weights" vector as WeightValue.
|
||||||
|
let actual_weights: Vec<WeightValue> = if weights_provided {
|
||||||
|
weights.unwrap().into_iter().map(WeightValue::F64).collect()
|
||||||
|
} else if weight_xcats_provided {
|
||||||
|
vec![WeightValue::Str(weight_xcat.unwrap()); agg_targ.len()]
|
||||||
|
} else {
|
||||||
|
// Default to numeric 1.0 if neither is provided
|
||||||
|
vec![WeightValue::F64(1.0); agg_targ.len()]
|
||||||
|
};
|
||||||
|
|
||||||
|
// 2) Build the "signs" vector; default to 1.0 if not provided
|
||||||
|
let signs = signs.unwrap_or_else(|| vec![1.0; agg_targ.len()]);
|
||||||
|
|
||||||
|
// 3) Optional: check lengths & zero values (only numeric weights).
|
||||||
|
check_weights_signs_lengths(&actual_weights, &signs, agg_xcats_for_cid, agg_targ.len())?;
|
||||||
|
|
||||||
|
// 4) Build the final nested HashMap
|
||||||
|
let mut weights_map: HashMap<String, HashMap<String, (WeightValue, f64)>> = HashMap::new();
|
||||||
|
|
||||||
for agg_o in agg_on {
|
for agg_o in agg_on {
|
||||||
let mut agg_t_map = HashMap::new();
|
let mut agg_t_map = HashMap::new();
|
||||||
for (i, agg_t) in agg_targ.iter().enumerate() {
|
for (i, agg_t) in agg_targ.iter().enumerate() {
|
||||||
let ticker = match _agg_xcats_for_cid {
|
// Format the ticker
|
||||||
true => format!("{}_{}", agg_o, agg_t),
|
let ticker = if agg_xcats_for_cid {
|
||||||
false => format!("{}_{}", agg_t, agg_o),
|
format!("{}_{}", agg_o, agg_t)
|
||||||
|
} else {
|
||||||
|
format!("{}_{}", agg_t, agg_o)
|
||||||
};
|
};
|
||||||
let weight_signs = vec![weights[i], signs[i]];
|
// Build the tuple (WeightValue, f64)
|
||||||
agg_t_map.insert(ticker, weight_signs);
|
let weight_sign_tuple = match &actual_weights[i] {
|
||||||
|
WeightValue::F64(val) => (WeightValue::F64(*val).clone(), signs[i]),
|
||||||
|
WeightValue::Str(vstr) => {
|
||||||
|
let new_str = format!("{}_{}", agg_t, vstr);
|
||||||
|
(WeightValue::Str(new_str), signs[i])
|
||||||
|
}
|
||||||
|
};
|
||||||
|
agg_t_map.insert(ticker, weight_sign_tuple);
|
||||||
}
|
}
|
||||||
weights_map.insert(agg_o.clone(), agg_t_map);
|
weights_map.insert(agg_o.clone(), agg_t_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(weights_map)
|
Ok(weights_map)
|
||||||
}
|
}
|
||||||
|
/// Checks that the given slices have the expected length and that:
|
||||||
|
/// - numeric weights are non-zero,
|
||||||
|
/// - signs are non-zero.
|
||||||
fn check_weights_signs_lengths(
|
fn check_weights_signs_lengths(
|
||||||
weights_vec: Vec<f64>,
|
weights_vec: &[WeightValue],
|
||||||
signs_vec: Vec<f64>,
|
signs_vec: &[f64],
|
||||||
_agg_xcats_for_cid: bool,
|
agg_xcats_for_cid: bool,
|
||||||
agg_targ_len: usize,
|
agg_targ_len: usize,
|
||||||
) -> Result<(), Box<dyn std::error::Error>> {
|
) -> Result<(), Box<dyn std::error::Error>> {
|
||||||
// for vx, vname in ...
|
// For diagnostics, decide what to call the dimension
|
||||||
let agg_targ = match _agg_xcats_for_cid {
|
let agg_targ = if agg_xcats_for_cid { "xcats" } else { "cids" };
|
||||||
true => "xcats",
|
|
||||||
false => "cids",
|
// 1) Check numeric weights for zeroes.
|
||||||
};
|
for (i, weight) in weights_vec.iter().enumerate() {
|
||||||
for (vx, vname) in vec![
|
if let WeightValue::F64(val) = weight {
|
||||||
(weights_vec.clone(), "weights"),
|
if *val == 0.0 {
|
||||||
(signs_vec.clone(), "signs"),
|
return Err(format!("The weight at index {} is 0.0", i).into());
|
||||||
] {
|
|
||||||
for (i, v) in vx.iter().enumerate() {
|
|
||||||
if *v == 0.0 {
|
|
||||||
return Err(format!("The {} at index {} is 0.0", vname, i).into());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if vx.len() != agg_targ_len {
|
}
|
||||||
return Err(format!(
|
// 2) Ensure the weights vector is the expected length.
|
||||||
"The length of {} ({}) does not match the length of {} ({})",
|
if weights_vec.len() != agg_targ_len {
|
||||||
vname,
|
return Err(format!(
|
||||||
vx.len(),
|
"The length of weights ({}) does not match the length of {} ({})",
|
||||||
agg_targ,
|
weights_vec.len(),
|
||||||
agg_targ_len
|
agg_targ,
|
||||||
)
|
agg_targ_len
|
||||||
.into());
|
)
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3) Check signs for zero.
|
||||||
|
for (i, sign) in signs_vec.iter().enumerate() {
|
||||||
|
if *sign == 0.0 {
|
||||||
|
return Err(format!("The sign at index {} is 0.0", i).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// 4) Ensure the signs vector is the expected length.
|
||||||
|
if signs_vec.len() != agg_targ_len {
|
||||||
|
return Err(format!(
|
||||||
|
"The length of signs ({}) does not match the length of {} ({})",
|
||||||
|
signs_vec.len(),
|
||||||
|
agg_targ,
|
||||||
|
agg_targ_len
|
||||||
|
)
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
fn rename_result_dfw_cols(
|
fn rename_result_dfw_cols(
|
||||||
@@ -393,6 +468,36 @@ fn agg_xcats_for_cid(cids: Vec<String>, xcats: Vec<String>) -> bool {
|
|||||||
xcats.len() > 1
|
xcats.len() > 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Represents a weight value that can be a string, (float, or integer).
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum WeightValue {
|
||||||
|
Str(String),
|
||||||
|
F64(f64),
|
||||||
|
}
|
||||||
|
impl From<String> for WeightValue {
|
||||||
|
fn from(s: String) -> Self {
|
||||||
|
WeightValue::Str(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> From<&'a str> for WeightValue {
|
||||||
|
fn from(s: &'a str) -> Self {
|
||||||
|
WeightValue::Str(s.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<f64> for WeightValue {
|
||||||
|
fn from(f: f64) -> Self {
|
||||||
|
WeightValue::F64(f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<i32> for WeightValue {
|
||||||
|
fn from(i: i32) -> Self {
|
||||||
|
WeightValue::F64(i as f64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Weighted linear combinations of cross sections or categories
|
/// Weighted linear combinations of cross sections or categories
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
/// * `df` - QDF DataFrame
|
/// * `df` - QDF DataFrame
|
||||||
@@ -417,7 +522,7 @@ pub fn linear_composite(
|
|||||||
cids: Vec<String>,
|
cids: Vec<String>,
|
||||||
weights: Option<Vec<f64>>,
|
weights: Option<Vec<f64>>,
|
||||||
signs: Option<Vec<f64>>,
|
signs: Option<Vec<f64>>,
|
||||||
weight_xcats: Option<Vec<String>>,
|
weight_xcat: Option<String>,
|
||||||
normalize_weights: bool,
|
normalize_weights: bool,
|
||||||
start: Option<String>,
|
start: Option<String>,
|
||||||
end: Option<String>,
|
end: Option<String>,
|
||||||
@@ -429,10 +534,28 @@ pub fn linear_composite(
|
|||||||
) -> Result<DataFrame, Box<dyn std::error::Error>> {
|
) -> Result<DataFrame, Box<dyn std::error::Error>> {
|
||||||
// Check if the DataFrame is a Quantamental DataFrame
|
// Check if the DataFrame is a Quantamental DataFrame
|
||||||
check_quantamental_dataframe(df)?;
|
check_quantamental_dataframe(df)?;
|
||||||
|
|
||||||
|
if agg_xcats_for_cid(cids.clone(), xcats.clone()) {
|
||||||
|
if weight_xcat.is_some() {
|
||||||
|
return Err(
|
||||||
|
format!(
|
||||||
|
"Using xcats as weights is not supported when aggregating cids for a single xcat. {:?} {:?}",
|
||||||
|
cids, xcats
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut rxcats = xcats.clone();
|
||||||
|
if weight_xcat.is_some() {
|
||||||
|
rxcats.extend(vec![weight_xcat.clone().unwrap()]);
|
||||||
|
}
|
||||||
|
|
||||||
let rdf = reduce_dataframe(
|
let rdf = reduce_dataframe(
|
||||||
df.clone(),
|
df.clone(),
|
||||||
Some(cids.clone()),
|
Some(cids.clone()),
|
||||||
Some(xcats.clone()),
|
Some(rxcats.clone()),
|
||||||
Some(vec!["value".to_string()]),
|
Some(vec!["value".to_string()]),
|
||||||
start.clone(),
|
start.clone(),
|
||||||
end.clone(),
|
end.clone(),
|
||||||
@@ -443,10 +566,11 @@ pub fn linear_composite(
|
|||||||
let new_xcat = new_xcat.unwrap_or_else(|| "COMPOSITE".to_string());
|
let new_xcat = new_xcat.unwrap_or_else(|| "COMPOSITE".to_string());
|
||||||
let new_cid = new_cid.unwrap_or_else(|| "GLB".to_string());
|
let new_cid = new_cid.unwrap_or_else(|| "GLB".to_string());
|
||||||
|
|
||||||
let dfw = pivot_dataframe_by_ticker(rdf.clone(), Some("value".to_string())).unwrap();
|
let dfw = pivot_dataframe_by_ticker(rdf, Some("value".to_string())).unwrap();
|
||||||
|
|
||||||
let mul_targets = get_mul_targets(cids.clone(), xcats.clone())?;
|
let mul_targets = get_mul_targets(cids.clone(), xcats.clone())?;
|
||||||
let weights_map = form_weights_and_signs_map(cids.clone(), xcats.clone(), weights, signs)?;
|
let weights_map =
|
||||||
|
form_weights_and_signs_map(cids.clone(), xcats.clone(), weights, weight_xcat, signs)?;
|
||||||
|
|
||||||
for (ticker, targets) in mul_targets.iter() {
|
for (ticker, targets) in mul_targets.iter() {
|
||||||
println!("ticker: {}, targets: {:?}", ticker, targets);
|
println!("ticker: {}, targets: {:?}", ticker, targets);
|
||||||
|
|||||||
365
src/utils/bdates.rs
Normal file
365
src/utils/bdates.rs
Normal file
@@ -0,0 +1,365 @@
|
|||||||
|
use chrono::{Datelike, Duration, NaiveDate, Weekday};
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub enum BDateFreq {
|
||||||
|
Daily,
|
||||||
|
WeeklyMonday,
|
||||||
|
MonthStart,
|
||||||
|
QuarterStart,
|
||||||
|
YearStart,
|
||||||
|
MonthEnd,
|
||||||
|
QuarterEnd,
|
||||||
|
WeeklyFriday,
|
||||||
|
YearEnd,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy)]
|
||||||
|
pub enum AggregationType {
|
||||||
|
Start, // Indicates picking the first date in a group.
|
||||||
|
End, // Indicates picking the last date in a group.
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BDateFreq {
|
||||||
|
pub fn from_string(freq: String) -> Result<Self, Box<dyn Error>> {
|
||||||
|
// use from_str to convert the string to a BDateFreq enum
|
||||||
|
Self::from_str(&freq)
|
||||||
|
}
|
||||||
|
pub fn from_str(freq: &str) -> Result<Self, Box<dyn Error>> {
|
||||||
|
match freq {
|
||||||
|
"D" => Ok(BDateFreq::Daily),
|
||||||
|
"W" => Ok(BDateFreq::WeeklyMonday),
|
||||||
|
"M" => Ok(BDateFreq::MonthStart),
|
||||||
|
"Q" => Ok(BDateFreq::QuarterStart),
|
||||||
|
"A" => Ok(BDateFreq::YearStart),
|
||||||
|
"ME" => Ok(BDateFreq::MonthEnd),
|
||||||
|
"QE" => Ok(BDateFreq::QuarterEnd),
|
||||||
|
"WF" => Ok(BDateFreq::WeeklyFriday),
|
||||||
|
"YE" => Ok(BDateFreq::YearEnd),
|
||||||
|
_ => Err("Invalid frequency specified".into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn agg_type(&self) -> AggregationType {
|
||||||
|
match self {
|
||||||
|
BDateFreq::Daily
|
||||||
|
| BDateFreq::WeeklyMonday
|
||||||
|
| BDateFreq::MonthStart
|
||||||
|
| BDateFreq::QuarterStart
|
||||||
|
| BDateFreq::YearStart => AggregationType::Start,
|
||||||
|
|
||||||
|
BDateFreq::WeeklyFriday
|
||||||
|
| BDateFreq::MonthEnd
|
||||||
|
| BDateFreq::QuarterEnd
|
||||||
|
| BDateFreq::YearEnd => AggregationType::End,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns only the business dates (Mon-Fri) between start_date and end_date
|
||||||
|
/// that match the desired frequency.
|
||||||
|
pub fn get_bdates_list_with_freq(
|
||||||
|
start_date_str: &str,
|
||||||
|
end_date_str: &str,
|
||||||
|
freq: BDateFreq,
|
||||||
|
) -> Result<Vec<NaiveDate>, Box<dyn Error>> {
|
||||||
|
let start_date = NaiveDate::parse_from_str(start_date_str, "%Y-%m-%d")?;
|
||||||
|
let end_date = NaiveDate::parse_from_str(end_date_str, "%Y-%m-%d")?;
|
||||||
|
|
||||||
|
if start_date > end_date {
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut dates = match freq {
|
||||||
|
BDateFreq::Daily => collect_daily(start_date, end_date),
|
||||||
|
|
||||||
|
BDateFreq::WeeklyMonday => collect_weekly(start_date, end_date, Weekday::Mon),
|
||||||
|
BDateFreq::WeeklyFriday => collect_weekly(start_date, end_date, Weekday::Fri),
|
||||||
|
|
||||||
|
BDateFreq::MonthStart => collect_monthly(start_date, end_date, /*start=*/ true),
|
||||||
|
BDateFreq::MonthEnd => collect_monthly(start_date, end_date, /*start=*/ false),
|
||||||
|
|
||||||
|
BDateFreq::QuarterStart => collect_quarterly(start_date, end_date, /*start=*/ true),
|
||||||
|
BDateFreq::QuarterEnd => collect_quarterly(start_date, end_date, /*start=*/ false),
|
||||||
|
|
||||||
|
BDateFreq::YearStart => collect_yearly(start_date, end_date, /*start=*/ true),
|
||||||
|
BDateFreq::YearEnd => collect_yearly(start_date, end_date, /*start=*/ false),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Filter out any weekend days that might slip in edge cases (e.g. if the
|
||||||
|
// computed "start of month" fell on Sat/Sun).
|
||||||
|
dates.retain(|d| d.weekday() != Weekday::Sat && d.weekday() != Weekday::Sun);
|
||||||
|
|
||||||
|
Ok(dates)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ------------------------------ Helpers ------------------------------ */
|
||||||
|
|
||||||
|
/// Return all business days, day-by-day.
|
||||||
|
fn collect_daily(start_date: NaiveDate, end_date: NaiveDate) -> Vec<NaiveDate> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
let mut current = start_date;
|
||||||
|
while current <= end_date {
|
||||||
|
if is_weekday(current) {
|
||||||
|
result.push(current);
|
||||||
|
}
|
||||||
|
current = current.succ_opt().unwrap();
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the specified weekday (e.g. Monday, Friday) in each week of the range.
|
||||||
|
fn collect_weekly(
|
||||||
|
start_date: NaiveDate,
|
||||||
|
end_date: NaiveDate,
|
||||||
|
target_weekday: Weekday,
|
||||||
|
) -> Vec<NaiveDate> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
|
||||||
|
// Find the first `target_weekday` on or after `start_date`.
|
||||||
|
// If `start_date` is already e.g. Monday, we can use it as is.
|
||||||
|
// Otherwise, jump ahead until we get that weekday.
|
||||||
|
let mut current = move_to_weekday_on_or_after(start_date, target_weekday);
|
||||||
|
|
||||||
|
// Step in 7-day increments (full weeks).
|
||||||
|
while current <= end_date {
|
||||||
|
result.push(current);
|
||||||
|
current = current + Duration::days(7);
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return either first or last business day in each month of the range.
|
||||||
|
fn collect_monthly(
|
||||||
|
start_date: NaiveDate,
|
||||||
|
end_date: NaiveDate,
|
||||||
|
want_first_day: bool,
|
||||||
|
) -> Vec<NaiveDate> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
|
||||||
|
// We'll iterate month by month, from (start_year, start_month) up to
|
||||||
|
// (end_year, end_month).
|
||||||
|
let mut year = start_date.year();
|
||||||
|
let mut month = start_date.month();
|
||||||
|
|
||||||
|
// A small helper that updates year/month by +1 month.
|
||||||
|
let next_month = |(yr, mo): (i32, u32)| -> (i32, u32) {
|
||||||
|
if mo == 12 {
|
||||||
|
(yr + 1, 1)
|
||||||
|
} else {
|
||||||
|
(yr, mo + 1)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Move `(year, month)` backward if necessary so that `(year, month)`
|
||||||
|
// definitely covers the entire period from `start_date` onward.
|
||||||
|
// Actually, it’s simpler to start from the actual (year, month) of start_date
|
||||||
|
// and go up. We'll just skip if the computed "day" < start_date.
|
||||||
|
|
||||||
|
// Continue while we haven't passed (end_year, end_month).
|
||||||
|
while year < end_date.year() || (year == end_date.year() && month <= end_date.month()) {
|
||||||
|
// Compute the date that represents either first or last business day
|
||||||
|
// for this (year, month).
|
||||||
|
let candidate = if want_first_day {
|
||||||
|
first_business_day_of_month(year, month)
|
||||||
|
} else {
|
||||||
|
last_business_day_of_month(year, month)
|
||||||
|
};
|
||||||
|
if candidate >= start_date && candidate <= end_date {
|
||||||
|
result.push(candidate);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to the next month.
|
||||||
|
let (ny, nm) = next_month((year, month));
|
||||||
|
year = ny;
|
||||||
|
month = nm;
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return either the first or last business day in each quarter of the range.
|
||||||
|
fn collect_quarterly(
|
||||||
|
start_date: NaiveDate,
|
||||||
|
end_date: NaiveDate,
|
||||||
|
want_first_day: bool,
|
||||||
|
) -> Vec<NaiveDate> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
|
||||||
|
// We'll figure out which quarter `start_date` is in, then jump quarter-by-quarter.
|
||||||
|
// Quarters are: Q1 = months 1–3, Q2 = 4–6, Q3 = 7–9, Q4 = 10–12.
|
||||||
|
// Start by computing the (year, quarter_index) for start_date.
|
||||||
|
let mut year = start_date.year();
|
||||||
|
let mut q = month_to_quarter(start_date.month());
|
||||||
|
|
||||||
|
while quarter_to_first_date(year, q) > end_date {
|
||||||
|
// If even the earliest day in that quarter is > end_date, we’re done.
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move backward if the quarter’s last day < start_date, etc.
|
||||||
|
// But simpler: we’ll do a loop that increments quarter by quarter, and
|
||||||
|
// pick the appropriate date each time. We break when we pass end_date.
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// For the current year+quarter, compute the date that’s either the first or last
|
||||||
|
// business day of that quarter:
|
||||||
|
let candidate = if want_first_day {
|
||||||
|
first_business_day_of_quarter(year, q)
|
||||||
|
} else {
|
||||||
|
last_business_day_of_quarter(year, q)
|
||||||
|
};
|
||||||
|
|
||||||
|
if candidate > end_date {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if candidate >= start_date {
|
||||||
|
result.push(candidate);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to next quarter.
|
||||||
|
if q == 4 {
|
||||||
|
year += 1;
|
||||||
|
q = 1;
|
||||||
|
} else {
|
||||||
|
q += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return either the first or last business day in each year of the range.
|
||||||
|
fn collect_yearly(
|
||||||
|
start_date: NaiveDate,
|
||||||
|
end_date: NaiveDate,
|
||||||
|
want_first_day: bool,
|
||||||
|
) -> Vec<NaiveDate> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
let mut year = start_date.year();
|
||||||
|
|
||||||
|
// Step year-by-year from `start_date.year()` up to `end_date.year()`.
|
||||||
|
while year <= end_date.year() {
|
||||||
|
let candidate = if want_first_day {
|
||||||
|
first_business_day_of_year(year)
|
||||||
|
} else {
|
||||||
|
last_business_day_of_year(year)
|
||||||
|
};
|
||||||
|
if candidate >= start_date && candidate <= end_date {
|
||||||
|
result.push(candidate);
|
||||||
|
}
|
||||||
|
year += 1;
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ---------------------- Low-Level Utility Functions ---------------------- */
|
||||||
|
|
||||||
|
/// Is this a weekday (Mon-Fri)?
|
||||||
|
fn is_weekday(date: NaiveDate) -> bool {
|
||||||
|
match date.weekday() {
|
||||||
|
Weekday::Sat | Weekday::Sun => false,
|
||||||
|
_ => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given a date and a `target_weekday`, returns the date that is the first
|
||||||
|
/// `target_weekday` on or after the given date.
|
||||||
|
fn move_to_weekday_on_or_after(date: NaiveDate, target: Weekday) -> NaiveDate {
|
||||||
|
let mut current = date;
|
||||||
|
while current.weekday() != target {
|
||||||
|
current = current.succ_opt().unwrap();
|
||||||
|
}
|
||||||
|
current
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the earliest business day of (year, month).
|
||||||
|
fn first_business_day_of_month(year: i32, month: u32) -> NaiveDate {
|
||||||
|
// Start with the 1st of the month.
|
||||||
|
let mut d = NaiveDate::from_ymd_opt(year, month, 1).expect("invalid year-month");
|
||||||
|
// If it’s Sat/Sun, move forward until we get a weekday.
|
||||||
|
while !is_weekday(d) {
|
||||||
|
d = d.succ_opt().unwrap();
|
||||||
|
}
|
||||||
|
d
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the latest business day of (year, month).
|
||||||
|
fn last_business_day_of_month(year: i32, month: u32) -> NaiveDate {
|
||||||
|
let last_dom = days_in_month(year, month);
|
||||||
|
let mut d = NaiveDate::from_ymd_opt(year, month, last_dom).expect("invalid year-month");
|
||||||
|
// If it’s Sat/Sun, move backward until we get a weekday.
|
||||||
|
while !is_weekday(d) {
|
||||||
|
d = d.pred_opt().unwrap();
|
||||||
|
}
|
||||||
|
d
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Number of days in a month (not considering leap years *beyond* chrono's normal handling).
|
||||||
|
fn days_in_month(year: i32, month: u32) -> u32 {
|
||||||
|
// Chrono can handle this if we do a little trick:
|
||||||
|
// Construct the 1st of the next month, then subtract 1 day.
|
||||||
|
// For example:
|
||||||
|
// if month == 12 => next = (year+1, 1, 1)
|
||||||
|
// else => next = (year, month+1, 1)
|
||||||
|
let (ny, nm) = if month == 12 {
|
||||||
|
(year + 1, 1)
|
||||||
|
} else {
|
||||||
|
(year, month + 1)
|
||||||
|
};
|
||||||
|
let first_of_next = NaiveDate::from_ymd_opt(ny, nm, 1).unwrap();
|
||||||
|
let last_of_this = first_of_next.pred_opt().unwrap();
|
||||||
|
last_of_this.day()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert a month (1..12) to a quarter (1..4).
|
||||||
|
fn month_to_quarter(m: u32) -> u32 {
|
||||||
|
(m - 1) / 3 + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns 1st day of a given (year, quarter).
|
||||||
|
fn quarter_to_first_date(year: i32, quarter: u32) -> NaiveDate {
|
||||||
|
let month = match quarter {
|
||||||
|
1 => 1,
|
||||||
|
2 => 4,
|
||||||
|
3 => 7,
|
||||||
|
4 => 10,
|
||||||
|
_ => panic!("invalid quarter"),
|
||||||
|
};
|
||||||
|
NaiveDate::from_ymd_opt(year, month, 1).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the earliest business day in (year, quarter).
|
||||||
|
fn first_business_day_of_quarter(year: i32, quarter: u32) -> NaiveDate {
|
||||||
|
let mut d = quarter_to_first_date(year, quarter);
|
||||||
|
while !is_weekday(d) {
|
||||||
|
d = d.succ_opt().unwrap();
|
||||||
|
}
|
||||||
|
d
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the last business day in (year, quarter).
|
||||||
|
fn last_business_day_of_quarter(year: i32, quarter: u32) -> NaiveDate {
|
||||||
|
// The last month in the quarter is quarter_to_first_date(...) + 2 months
|
||||||
|
// Then we find the last day of that month.
|
||||||
|
let start = quarter_to_first_date(year, quarter);
|
||||||
|
let last_month = start.month() + 2; // e.g. Q1 => month=1 => +2=3 => March
|
||||||
|
last_business_day_of_month(year, last_month)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns Jan 1st of a given year (adjust if weekend).
|
||||||
|
fn first_business_day_of_year(year: i32) -> NaiveDate {
|
||||||
|
let mut d = NaiveDate::from_ymd_opt(year, 1, 1).unwrap();
|
||||||
|
while !is_weekday(d) {
|
||||||
|
d = d.succ_opt().unwrap();
|
||||||
|
}
|
||||||
|
d
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns Dec 31st of a given year (adjust if weekend).
|
||||||
|
fn last_business_day_of_year(year: i32) -> NaiveDate {
|
||||||
|
let mut d = NaiveDate::from_ymd_opt(year, 12, 31).unwrap();
|
||||||
|
while !is_weekday(d) {
|
||||||
|
d = d.pred_opt().unwrap();
|
||||||
|
}
|
||||||
|
d
|
||||||
|
}
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
use crate::utils::bdates;
|
||||||
|
use crate::utils::bdates::BDateFreq;
|
||||||
use chrono::NaiveDate;
|
use chrono::NaiveDate;
|
||||||
use chrono::{Datelike, Weekday};
|
use chrono::{Datelike, Weekday};
|
||||||
use polars::prelude::*;
|
use polars::prelude::*;
|
||||||
@@ -57,57 +59,6 @@ pub fn get_bdates_list(
|
|||||||
Ok(business_days)
|
Ok(business_days)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub enum BDateFreq {
|
|
||||||
Daily,
|
|
||||||
WeeklyMonday,
|
|
||||||
MonthStart,
|
|
||||||
QuarterStart,
|
|
||||||
YearStart,
|
|
||||||
MonthEnd,
|
|
||||||
QuarterEnd,
|
|
||||||
WeeklyFriday,
|
|
||||||
YearEnd,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BDateFreq {
|
|
||||||
pub fn from_str(freq: &str) -> Result<Self, Box<dyn Error>> {
|
|
||||||
match freq {
|
|
||||||
"D" => Ok(BDateFreq::Daily),
|
|
||||||
"W" => Ok(BDateFreq::WeeklyMonday),
|
|
||||||
"M" => Ok(BDateFreq::MonthStart),
|
|
||||||
"Q" => Ok(BDateFreq::QuarterStart),
|
|
||||||
"A" => Ok(BDateFreq::YearStart),
|
|
||||||
"ME" => Ok(BDateFreq::MonthEnd),
|
|
||||||
"QE" => Ok(BDateFreq::QuarterEnd),
|
|
||||||
"WF" => Ok(BDateFreq::WeeklyFriday),
|
|
||||||
"YE" => Ok(BDateFreq::YearEnd),
|
|
||||||
_ => Err("Invalid frequency specified".into()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn agg_type(&self) -> AggregationType {
|
|
||||||
match self {
|
|
||||||
BDateFreq::Daily
|
|
||||||
| BDateFreq::WeeklyMonday
|
|
||||||
| BDateFreq::MonthStart
|
|
||||||
| BDateFreq::QuarterStart
|
|
||||||
| BDateFreq::YearStart => AggregationType::Start,
|
|
||||||
BDateFreq::WeeklyFriday
|
|
||||||
| BDateFreq::MonthEnd
|
|
||||||
| BDateFreq::QuarterEnd
|
|
||||||
| BDateFreq::YearEnd => AggregationType::End,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
pub enum AggregationType {
|
|
||||||
Start, // Indicates picking the first date in a group.
|
|
||||||
End, // Indicates picking the last date in a group.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map a BDateFreq to an AggregationType.
|
|
||||||
fn compute_group_key(d: NaiveDate, freq: BDateFreq) -> String {
|
fn compute_group_key(d: NaiveDate, freq: BDateFreq) -> String {
|
||||||
match freq {
|
match freq {
|
||||||
// For Daily, each date is its own group.
|
// For Daily, each date is its own group.
|
||||||
@@ -130,19 +81,32 @@ fn compute_group_key(d: NaiveDate, freq: BDateFreq) -> String {
|
|||||||
BDateFreq::YearStart | BDateFreq::YearEnd => format!("{}", d.year()),
|
BDateFreq::YearStart | BDateFreq::YearEnd => format!("{}", d.year()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pub fn get_bdates_series_default_opt(
|
||||||
pub fn get_bdates_series_default(
|
|
||||||
start_date: String,
|
start_date: String,
|
||||||
end_date: String,
|
end_date: String,
|
||||||
freq: Option<String>,
|
freq: Option<String>,
|
||||||
) -> Result<Series, Box<dyn Error>> {
|
) -> Result<Series, Box<dyn Error>> {
|
||||||
let freq = freq.unwrap_or_else(|| "D".to_string());
|
let freq = freq.unwrap_or_else(|| "D".to_string());
|
||||||
let freq = BDateFreq::from_str(&freq)?;
|
let freq = BDateFreq::from_str(&freq)?;
|
||||||
get_bdates_series(start_date, end_date, freq)
|
let series = Series::new(
|
||||||
|
"bdates".into(),
|
||||||
|
bdates::get_bdates_list_with_freq(&start_date, &end_date, freq)?,
|
||||||
|
);
|
||||||
|
Ok(series)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_bdates_series_default_pl(
|
||||||
|
start_date: String,
|
||||||
|
end_date: String,
|
||||||
|
freq: Option<String>,
|
||||||
|
) -> Result<Series, Box<dyn Error>> {
|
||||||
|
let freq = freq.unwrap_or_else(|| "D".to_string());
|
||||||
|
let freq = BDateFreq::from_str(&freq)?;
|
||||||
|
get_bdates_series_pl(start_date, end_date, freq)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the business dates between two dates as a Series.
|
/// Get the business dates between two dates as a Series.
|
||||||
pub fn get_bdates_series(
|
pub fn get_bdates_series_pl(
|
||||||
start_date: String,
|
start_date: String,
|
||||||
end_date: String,
|
end_date: String,
|
||||||
freq: BDateFreq,
|
freq: BDateFreq,
|
||||||
@@ -159,8 +123,8 @@ pub fn get_bdates_series(
|
|||||||
])?;
|
])?;
|
||||||
let gb = df.lazy().group_by(["group"]);
|
let gb = df.lazy().group_by(["group"]);
|
||||||
let aggx = match freq.agg_type() {
|
let aggx = match freq.agg_type() {
|
||||||
AggregationType::Start => gb.agg([col("bdates").first()]),
|
bdates::AggregationType::Start => gb.agg([col("bdates").first()]),
|
||||||
AggregationType::End => gb.agg([col("bdates").last()]),
|
bdates::AggregationType::End => gb.agg([col("bdates").last()]),
|
||||||
};
|
};
|
||||||
let result = aggx.collect()?;
|
let result = aggx.collect()?;
|
||||||
let result = result
|
let result = result
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
pub mod qdf;
|
pub mod bdates;
|
||||||
|
pub mod dateutils;
|
||||||
pub mod misc;
|
pub mod misc;
|
||||||
pub mod dateutils;
|
pub mod qdf;
|
||||||
|
|||||||
373
src/utils/qdf/blacklist.rs
Normal file
373
src/utils/qdf/blacklist.rs
Normal file
@@ -0,0 +1,373 @@
|
|||||||
|
use crate::utils::bdates::{get_bdates_list_with_freq, BDateFreq};
|
||||||
|
use crate::utils::dateutils::get_min_max_real_dates;
|
||||||
|
use crate::utils::misc::get_cid;
|
||||||
|
use crate::utils::qdf::core::check_quantamental_dataframe;
|
||||||
|
use chrono::NaiveDate;
|
||||||
|
use polars::prelude::*;
|
||||||
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
|
use crate::utils::qdf::get_unique_metrics;
|
||||||
|
|
||||||
|
// struct Blacklist which is a wrapper around hashmap and btreemap
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Blacklist {
|
||||||
|
pub blacklist: BTreeMap<String, (String, String)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// impl hashmap into
|
||||||
|
impl Blacklist {
|
||||||
|
pub fn into_hashmap(self) -> HashMap<String, (String, String)> {
|
||||||
|
self.blacklist.into_iter().collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a blacklist to a Quantamental DataFrame.
|
||||||
|
///
|
||||||
|
/// * `blacklist` is a map from any “ticker‑like” key to a tuple of
|
||||||
|
/// `(start_date, end_date)` in **inclusive** `"YYYY‑MM‑DD"` format.
|
||||||
|
/// * `metrics` – if `None`, every metric from `get_unique_metrics(df)`
|
||||||
|
/// is used.
|
||||||
|
/// * `group_by_cid = Some(false)` is not implemented yet.
|
||||||
|
pub fn apply_blacklist(
|
||||||
|
df: &mut DataFrame,
|
||||||
|
blacklist: &BTreeMap<String, (String, String)>,
|
||||||
|
metrics: Option<Vec<String>>,
|
||||||
|
group_by_cid: Option<bool>,
|
||||||
|
) -> Result<DataFrame, Box<dyn std::error::Error>> {
|
||||||
|
check_quantamental_dataframe(df)?;
|
||||||
|
// dataframe is like:
|
||||||
|
// | cid | xcat | real_date | metric1 | metric2 |
|
||||||
|
// |-----|------|-----------|---------|---------|
|
||||||
|
// | A | B | 2023-01-01| 1.0 | 2.0 |
|
||||||
|
// | A | B | 2023-01-02| 1.0 | 2.0 |
|
||||||
|
// | A | C | 2023-01-01| 1.0 | 2.0 |
|
||||||
|
// | A | C | 2023-01-02| 1.0 | 2.0 |
|
||||||
|
// | D | E | 2023-01-01| 1.0 | 2.0 |
|
||||||
|
// | D | E | 2023-01-02| 1.0 | 2.0 |
|
||||||
|
|
||||||
|
// (real date column is Naive date)
|
||||||
|
|
||||||
|
// blacklist is like:
|
||||||
|
// {'A_B_1': ('2023-01-02', '2023-01-03'),
|
||||||
|
// 'A_B_2': ('2023-01-04', '2023-01-05'),
|
||||||
|
// 'A_C_1': ('2023-01-02', '2023-01-03'), }
|
||||||
|
|
||||||
|
// get_cid('A_B_1') = 'A'
|
||||||
|
// get_cid('A_B_2') = 'A'
|
||||||
|
// get_cid('D_E_1') = 'D'
|
||||||
|
|
||||||
|
Ok(df.clone())
|
||||||
|
}
|
||||||
|
/// Create a blacklist from a Quantamental DataFrame.
|
||||||
|
/// The blacklist is a mapping of tickers to date ranges where the specified metrics are null or NaN.
|
||||||
|
/// # Arguments:
|
||||||
|
/// * `df` - The Quantamental DataFrame.
|
||||||
|
/// * `group_by_cid` - If true, group the blacklist by `cid`. Defaults to true.
|
||||||
|
/// * `blacklist_name` - The name of the blacklist. Defaults to "BLACKLIST".
|
||||||
|
/// * `metrics` - The metrics to check for null or NaN values. If None, all metrics are used.
|
||||||
|
pub fn create_blacklist_from_qdf(
|
||||||
|
df: &DataFrame,
|
||||||
|
group_by_cid: Option<bool>,
|
||||||
|
blacklist_name: Option<String>,
|
||||||
|
metrics: Option<Vec<String>>,
|
||||||
|
) -> Result<BTreeMap<String, (String, String)>, Box<dyn Error>> {
|
||||||
|
check_quantamental_dataframe(df)?;
|
||||||
|
let metrics = metrics.unwrap_or_else(|| get_unique_metrics(df).unwrap());
|
||||||
|
let blacklist_name = blacklist_name.unwrap_or_else(|| "BLACKLIST".into());
|
||||||
|
let group_by_cid = group_by_cid.unwrap_or(true);
|
||||||
|
|
||||||
|
let (min_date, max_date) = get_min_max_real_dates(df, "real_date".into())?;
|
||||||
|
let min_date_str = min_date.format("%Y-%m-%d").to_string();
|
||||||
|
let max_date_str = max_date.format("%Y-%m-%d").to_string();
|
||||||
|
// let all_bdates = get_bdates_series_default_opt(min_date_str, max_date_str, None)?;
|
||||||
|
let all_bdates = get_bdates_list_with_freq(
|
||||||
|
min_date_str.clone().as_str(),
|
||||||
|
max_date_str.clone().as_str(),
|
||||||
|
BDateFreq::Daily,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// if none of the metrics are null or NaN, return an empty blacklist
|
||||||
|
if !metrics.iter().any(|metric| {
|
||||||
|
df.column(metric)
|
||||||
|
.map(|col| col.is_null().any())
|
||||||
|
.unwrap_or(false)
|
||||||
|
}) {
|
||||||
|
return Ok(BTreeMap::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
// let null_mask = get_nan_mask(df, metrics)?;
|
||||||
|
// let df = df.filter(&null_mask)?.clone();
|
||||||
|
|
||||||
|
let df = df
|
||||||
|
.clone()
|
||||||
|
.lazy()
|
||||||
|
.with_columns([
|
||||||
|
(cols(metrics.clone()).is_null().or(cols(metrics).is_nan())).alias("null_mask")
|
||||||
|
])
|
||||||
|
.filter(col("null_mask"))
|
||||||
|
// if is now empty, return an empty blacklist
|
||||||
|
.sort(
|
||||||
|
["cid", "xcat"],
|
||||||
|
SortMultipleOptions::default().with_maintain_order(true),
|
||||||
|
)
|
||||||
|
.group_by([col("cid"), col("xcat")])
|
||||||
|
// .agg([col("real_date").sort(SortOptions::default())])
|
||||||
|
.agg([col("real_date")
|
||||||
|
.dt()
|
||||||
|
.strftime("%Y-%m-%d")
|
||||||
|
.sort(SortOptions::default())])
|
||||||
|
.select([
|
||||||
|
concat_str([col("cid"), col("xcat")], "_", true).alias("ticker"),
|
||||||
|
col("real_date").alias("real_dates"),
|
||||||
|
])
|
||||||
|
.collect()?;
|
||||||
|
|
||||||
|
// assert!(0 == 1, "{:?}", df);
|
||||||
|
|
||||||
|
let ticker_vec = df
|
||||||
|
.column("ticker")?
|
||||||
|
.str()?
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|opt| opt.map(|s| s.to_string()))
|
||||||
|
.collect::<Vec<String>>();
|
||||||
|
|
||||||
|
let rdt = get_vec_of_vec_of_dates_from_df(df)?;
|
||||||
|
|
||||||
|
let mut blk: HashMap<String, Vec<String>> = HashMap::new();
|
||||||
|
for (tkr, dates) in ticker_vec.iter().zip(rdt.iter()) {
|
||||||
|
if group_by_cid {
|
||||||
|
let _cid = get_cid(tkr.clone())?;
|
||||||
|
if blk.contains_key(&_cid) {
|
||||||
|
blk.get_mut(&_cid).unwrap().extend(dates.iter().cloned());
|
||||||
|
} else {
|
||||||
|
blk.insert(_cid, dates.clone());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
blk.insert(tkr.to_string(), dates.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (_key, vals) in blk.iter_mut() {
|
||||||
|
// order is important - dedup depends on the vec being sorted
|
||||||
|
vals.sort();
|
||||||
|
vals.dedup();
|
||||||
|
}
|
||||||
|
|
||||||
|
let all_bdates_strs = all_bdates
|
||||||
|
.iter()
|
||||||
|
.map(|date| date.format("%Y-%m-%d").to_string())
|
||||||
|
.collect::<Vec<String>>();
|
||||||
|
|
||||||
|
let mut blacklist: HashMap<String, (String, String)> = HashMap::new();
|
||||||
|
for (tkr, dates) in blk.iter() {
|
||||||
|
let date_ranges = convert_dates_list_to_date_ranges(dates.clone(), all_bdates_strs.clone());
|
||||||
|
for (rng_idx, (start_date, end_date)) in date_ranges.iter() {
|
||||||
|
let range_key = format!("{}_{}_{}", tkr, blacklist_name.clone(), rng_idx);
|
||||||
|
blacklist.insert(range_key, (start_date.clone(), end_date.clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Ok(blacklist)
|
||||||
|
|
||||||
|
let mut btree_map: BTreeMap<String, (String, String)> = BTreeMap::new();
|
||||||
|
for (key, (start_date, end_date)) in blacklist.iter() {
|
||||||
|
btree_map.insert(key.clone(), (start_date.clone(), end_date.clone()));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(btree_map)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a mask of NaN values for the specified metrics in the DataFrame.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn get_nan_mask(
|
||||||
|
df: &DataFrame,
|
||||||
|
metrics: Vec<String>,
|
||||||
|
) -> Result<ChunkedArray<BooleanType>, Box<dyn Error>> {
|
||||||
|
let null_masks: Vec<ChunkedArray<BooleanType>> = metrics
|
||||||
|
.iter()
|
||||||
|
.map(|metric| {
|
||||||
|
let null_mask = df.column(metric.as_str())?.is_null();
|
||||||
|
let nan_mask = df.column(metric.as_str())?.is_nan()?;
|
||||||
|
Ok(null_mask | nan_mask)
|
||||||
|
})
|
||||||
|
.collect::<Result<_, Box<dyn Error>>>()?;
|
||||||
|
let null_mask = null_masks
|
||||||
|
.into_iter()
|
||||||
|
.reduce(|acc, mask| acc | mask)
|
||||||
|
.unwrap_or_else(|| BooleanChunked::full_null("null_mask".into(), df.height()));
|
||||||
|
Ok(null_mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert_dates_list_to_date_ranges(
|
||||||
|
blacklist: Vec<String>,
|
||||||
|
all_bdates_strs: Vec<String>,
|
||||||
|
) -> HashMap<String, (String, String)> {
|
||||||
|
// Step 1: Map every date in all_bdates_strs to its index
|
||||||
|
let mut all_map: HashMap<String, usize> = HashMap::new();
|
||||||
|
for (i, d) in all_bdates_strs.iter().enumerate() {
|
||||||
|
all_map.insert(d.clone(), i);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Convert each blacklisted date into its index, if it exists
|
||||||
|
let mut blacklisted_indices: Vec<usize> = Vec::new();
|
||||||
|
for dt in blacklist {
|
||||||
|
if let Some(&idx) = all_map.get(&dt) {
|
||||||
|
blacklisted_indices.push(idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 3: Sort the blacklisted indices
|
||||||
|
blacklisted_indices.sort_unstable();
|
||||||
|
|
||||||
|
// Step 4: Traverse and group consecutive indices into ranges
|
||||||
|
let mut result: HashMap<i64, (String, String)> = HashMap::new();
|
||||||
|
let mut string_result: HashMap<String, (String, String)> = HashMap::new();
|
||||||
|
|
||||||
|
if blacklisted_indices.is_empty() {
|
||||||
|
return string_result;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut range_idx: i64 = 0;
|
||||||
|
let mut start_idx = blacklisted_indices[0];
|
||||||
|
let mut end_idx = start_idx;
|
||||||
|
|
||||||
|
for &cur_idx in blacklisted_indices.iter().skip(1) {
|
||||||
|
if cur_idx == end_idx + 1 {
|
||||||
|
// We are still in a contiguous run
|
||||||
|
end_idx = cur_idx;
|
||||||
|
} else {
|
||||||
|
// We hit a break in contiguity, so store the last range
|
||||||
|
result.insert(
|
||||||
|
range_idx,
|
||||||
|
(
|
||||||
|
all_bdates_strs[start_idx].clone(),
|
||||||
|
all_bdates_strs[end_idx].clone(),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
range_idx += 1;
|
||||||
|
|
||||||
|
// Start a new range
|
||||||
|
start_idx = cur_idx;
|
||||||
|
end_idx = cur_idx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't forget to store the final range after the loop
|
||||||
|
result.insert(
|
||||||
|
range_idx,
|
||||||
|
(
|
||||||
|
all_bdates_strs[start_idx].clone(),
|
||||||
|
all_bdates_strs[end_idx].clone(),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
let max_digits = result.keys().max().unwrap_or(&-1).to_string().len();
|
||||||
|
for (key, (start_date, end_date)) in result.iter() {
|
||||||
|
let new_key = format!("{:0width$}", key, width = max_digits);
|
||||||
|
string_result.insert(new_key, (start_date.clone(), end_date.clone()));
|
||||||
|
}
|
||||||
|
|
||||||
|
string_result
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_vec_of_vec_of_dates_from_df(df: DataFrame) -> Result<Vec<Vec<String>>, Box<dyn Error>> {
|
||||||
|
let rdt = df
|
||||||
|
.column("real_dates")?
|
||||||
|
// .clone()
|
||||||
|
.as_series()
|
||||||
|
.unwrap()
|
||||||
|
.list()?
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|opt| opt)
|
||||||
|
.collect::<Vec<Series>>()
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
s.str()
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|opt| opt.map(|s| s.to_string()))
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
})
|
||||||
|
.collect::<Vec<Vec<String>>>();
|
||||||
|
Ok(rdt)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
fn get_vec_of_vec_of_naivedates_from_df(
|
||||||
|
df: DataFrame,
|
||||||
|
) -> Result<Vec<Vec<NaiveDate>>, Box<dyn Error>> {
|
||||||
|
let rdt = df
|
||||||
|
.column("real_dates")?
|
||||||
|
// .clone()
|
||||||
|
.as_series()
|
||||||
|
.unwrap()
|
||||||
|
.list()?
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|opt| opt)
|
||||||
|
.collect::<Vec<Series>>()
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
s.date()
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|opt| opt.and_then(|date| NaiveDate::from_num_days_from_ce_opt(date)))
|
||||||
|
.collect::<Vec<NaiveDate>>()
|
||||||
|
})
|
||||||
|
.collect::<Vec<Vec<NaiveDate>>>();
|
||||||
|
Ok(rdt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fn get_vec_of_vec_of_dates_from_df(df: DataFrame) -> Result<Vec<Vec<String>>, Box<dyn Error>> {
|
||||||
|
// let real_dates_column = df.column("real_dates")?.clone();
|
||||||
|
// let series = real_dates_column.as_series().unwrap().clone();
|
||||||
|
// let rdt = series.list()?.clone();
|
||||||
|
// let rdt = rdt
|
||||||
|
// .into_iter()
|
||||||
|
// .filter_map(|opt| opt)
|
||||||
|
// .collect::<Vec<Series>>();
|
||||||
|
// let rdt = rdt
|
||||||
|
// .iter()
|
||||||
|
// .map(|s| {
|
||||||
|
// s.str()
|
||||||
|
// .unwrap()
|
||||||
|
// .into_iter()
|
||||||
|
// .filter_map(|opt| opt.map(|s| s.to_string()))
|
||||||
|
// .collect::<Vec<String>>()
|
||||||
|
// })
|
||||||
|
// .collect::<Vec<Vec<String>>>();
|
||||||
|
// Ok(rdt)
|
||||||
|
// }
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_convert_dates_list_to_date_ranges() {
|
||||||
|
let all_dates = vec![
|
||||||
|
"2023-01-01".to_string(),
|
||||||
|
"2023-01-02".to_string(),
|
||||||
|
"2023-01-03".to_string(),
|
||||||
|
"2023-01-04".to_string(),
|
||||||
|
"2023-01-05".to_string(),
|
||||||
|
"2023-01-06".to_string(),
|
||||||
|
];
|
||||||
|
let blacklist = vec![
|
||||||
|
"2023-01-02".to_string(),
|
||||||
|
"2023-01-03".to_string(),
|
||||||
|
"2023-01-05".to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
let result = convert_dates_list_to_date_ranges(blacklist, all_dates);
|
||||||
|
// Expect two ranges:
|
||||||
|
// range 0 => ("2023-01-02", "2023-01-03")
|
||||||
|
// range 1 => ("2023-01-05", "2023-01-05")
|
||||||
|
assert_eq!(
|
||||||
|
result["0"],
|
||||||
|
("2023-01-02".to_string(), "2023-01-03".to_string())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
result["1"],
|
||||||
|
("2023-01-05".to_string(), "2023-01-05".to_string())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -17,14 +17,15 @@ use std::error::Error;
|
|||||||
pub fn check_quantamental_dataframe(df: &DataFrame) -> Result<(), Box<dyn Error>> {
|
pub fn check_quantamental_dataframe(df: &DataFrame) -> Result<(), Box<dyn Error>> {
|
||||||
let expected_cols = ["real_date", "cid", "xcat"];
|
let expected_cols = ["real_date", "cid", "xcat"];
|
||||||
let expected_dtype = [DataType::Date, DataType::String, DataType::String];
|
let expected_dtype = [DataType::Date, DataType::String, DataType::String];
|
||||||
|
let err = "Quantamental DataFrame must have at least 4 columns: 'real_date', 'cid', 'xcat' and one or more metrics.";
|
||||||
for (col, dtype) in expected_cols.iter().zip(expected_dtype.iter()) {
|
for (col, dtype) in expected_cols.iter().zip(expected_dtype.iter()) {
|
||||||
let col = df.column(col);
|
let col = df.column(col);
|
||||||
if col.is_err() {
|
if col.is_err() {
|
||||||
return Err(format!("Column {:?} not found", col).into());
|
return Err(format!("{} Column {:?} not found", err, col).into());
|
||||||
}
|
}
|
||||||
let col = col?;
|
let col = col?;
|
||||||
if col.dtype() != dtype {
|
if col.dtype() != dtype {
|
||||||
return Err(format!("Column {:?} has wrong dtype", col).into());
|
return Err(format!("{} Column {:?} has wrong dtype", err, col).into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
|
pub mod blacklist;
|
||||||
pub mod core;
|
pub mod core;
|
||||||
pub mod update_df;
|
|
||||||
pub mod load;
|
pub mod load;
|
||||||
pub mod reduce_df;
|
|
||||||
pub mod pivots;
|
pub mod pivots;
|
||||||
|
pub mod reduce_df;
|
||||||
|
pub mod update_df;
|
||||||
|
|
||||||
// Re-export submodules for easier access
|
// Re-export submodules for easier access
|
||||||
pub use core::*;
|
pub use core::*;
|
||||||
pub use update_df::*;
|
|
||||||
pub use load::*;
|
pub use load::*;
|
||||||
pub use reduce_df::*;
|
pub use reduce_df::*;
|
||||||
|
pub use update_df::*;
|
||||||
@@ -30,12 +30,12 @@ pub fn reduce_dataframe(
|
|||||||
let df_size = df.shape();
|
let df_size = df.shape();
|
||||||
let mut new_df = df.clone();
|
let mut new_df = df.clone();
|
||||||
|
|
||||||
let ticker_col: Column = get_ticker_column_for_quantamental_dataframe(&new_df)?;
|
let ticker_col = get_ticker_column_for_quantamental_dataframe(&new_df)?;
|
||||||
|
|
||||||
// if cids is not provided, get all unique cids
|
// if cids is not provided, get all unique cids
|
||||||
let u_cids: Vec<String> = get_unique_cids(&new_df)?;
|
let u_cids = get_unique_cids(&new_df)?;
|
||||||
let u_xcats: Vec<String> = get_unique_xcats(&new_df)?;
|
let u_xcats = get_unique_xcats(&new_df)?;
|
||||||
let u_tickers: Vec<String> = _get_unique_strs_from_str_column_object(&ticker_col)?;
|
let u_tickers = _get_unique_strs_from_str_column_object(&ticker_col)?;
|
||||||
|
|
||||||
let cids_vec = cids.unwrap_or_else(|| u_cids.clone());
|
let cids_vec = cids.unwrap_or_else(|| u_cids.clone());
|
||||||
let specified_cids: Vec<&str> = cids_vec.iter().map(AsRef::as_ref).collect();
|
let specified_cids: Vec<&str> = cids_vec.iter().map(AsRef::as_ref).collect();
|
||||||
|
|||||||
Reference in New Issue
Block a user