{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "import os\n",
    "import argparse\n",
    "import qlib\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = '3'\n",
    "instruments = 'sp500'\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from collections import Counter\n",
    "from alphagen.data.expression import *\n",
    "from alphagen.models.alpha_pool import AlphaPool\n",
    "from alphagen.utils.correlation import batch_pearsonr, batch_spearmanr\n",
    "from alphagen_generic.features import *\n",
    "from gan.utils.data import get_data_by_year\n",
    "\n",
    "\n",
    "def pred_pool(capacity,data,cache):\n",
    "    from alphagen_qlib.calculator import QLibStockDataCalculator\n",
    "    pool = AlphaPool(capacity=capacity,\n",
    "                    stock_data=data,\n",
    "                    target=target,\n",
    "                    ic_lower_bound=None)\n",
    "    exprs = []\n",
    "    for key in dict(Counter(cache).most_common(capacity)):\n",
    "        exprs.append(eval(key))\n",
    "    pool.force_load_exprs(exprs)\n",
    "    pool._optimize(alpha=5e-3, lr=5e-1, n_iter=2000)\n",
    "\n",
    "    exprs = pool.exprs[:pool.size]\n",
    "    weights = pool.weights[:pool.size]\n",
    "    calculator_test = QLibStockDataCalculator(data, target)\n",
    "    ensemble_value = calculator_test.make_ensemble_alpha(exprs, weights)\n",
    "    return ensemble_value\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Infer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for seed in range(1):\n",
    "    for train_end in range(2016,2017):\n",
    "        for num in [1]:\n",
    "            save_dir = f'out_gp/{instruments}_{train_end}_day_{seed}' \n",
    "            print(save_dir)\n",
    "            \n",
    "            returned = get_data_by_year(\n",
    "                train_start = 2010,train_end=train_end,valid_year=train_end+1,test_year =train_end+2,\n",
    "                instruments=instruments, target=target,freq='day',\n",
    "                qlib_path = '/your_path/data/qlib_data/us_data'\n",
    "            )\n",
    "            data_all,data,data_valid,data_valid_withhead,data_test,data_test_withhead,name = returned\n",
    "            cache = json.load(open(f'{save_dir}/2.json'))['cache']\n",
    "\n",
    "            features = ['open_', 'close', 'high', 'low', 'volume', 'vwap']\n",
    "            constants = [f'Constant({v})' for v in [-30., -10., -5., -2., -1., -0.5, -0.01, 0.01, 0.5, 1., 2., 5., 10., 30.]]\n",
    "            terminals = features + constants\n",
    "\n",
    "            pred = pred_pool(num,data_all,cache)\n",
    "            pred = pred[-data_test.n_days:]\n",
    "            torch.save(pred.detach().cpu(),f\"{save_dir}/pred_{num}.pt\")\n",
    "            \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "from alphagen.utils.correlation import batch_pearsonr, batch_spearmanr, batch_ret, batch_sharpe_ratio, batch_max_drawdown\n",
    "import torch\n",
    "import os\n",
    "import numpy as np\n",
    "\n",
    "def chunk_batch_spearmanr(x, y, chunk_size=100):\n",
    "    n_days = len(x)\n",
    "    spearmanr_list= []\n",
    "    for i in range(0, n_days, chunk_size):\n",
    "        spearmanr_list.append(batch_spearmanr(x[i:i+chunk_size], y[i:i+chunk_size]))\n",
    "    spearmanr_list = torch.cat(spearmanr_list, dim=0)\n",
    "    return spearmanr_list\n",
    "\n",
    "def get_tensor_metrics(x, y, risk_free_rate=0.0):\n",
    "    # Ensure tensors are 2D (days, stocks)\n",
    "    if x.dim() > 2: x = x.squeeze(-1)\n",
    "    if y.dim() > 2: y = y.squeeze(-1)\n",
    "\n",
    "    ic_s = batch_pearsonr(x, y)\n",
    "    ric_s = chunk_batch_spearmanr(x, y, chunk_size=400)\n",
    "    ret_s = batch_ret(x, y) -0.003\n",
    "\n",
    "    ic_s = torch.nan_to_num(ic_s, nan=0.)\n",
    "    ric_s = torch.nan_to_num(ric_s, nan=0.)\n",
    "    ret_s = torch.nan_to_num(ret_s, nan=0.) / 20\n",
    "    ic_s_mean = ic_s.mean().item()\n",
    "    ic_s_std = ic_s.std().item() if ic_s.std().item() > 1e-6 else 1.0\n",
    "    ric_s_mean = ric_s.mean().item()\n",
    "    ric_s_std = ric_s.std().item() if ric_s.std().item() > 1e-6 else 1.0\n",
    "    ret_s_mean = ret_s.mean().item()\n",
    "    ret_s_std = ret_s.std().item() if ret_s.std().item() > 1e-6 else 1.0\n",
    "    \n",
    "    # Calculate Sharpe Ratio and Maximum Drawdown for ret series\n",
    "    ret_sharpe = batch_sharpe_ratio(ret_s, risk_free_rate).item()\n",
    "    ret_mdd = batch_max_drawdown(ret_s).item()\n",
    "    result = dict(\n",
    "        ic=ic_s_mean,\n",
    "        ic_std=ic_s_std,\n",
    "        icir=ic_s_mean / ic_s_std,\n",
    "        ric=ric_s_mean,\n",
    "        ric_std=ric_s_std,\n",
    "        ricir=ric_s_mean / ric_s_std,\n",
    "        ret=ret_s_mean * len(ret_s) / 3,\n",
    "        ret_std=ret_s_std,\n",
    "        retir=ret_s_mean / ret_s_std,\n",
    "        ret_sharpe=ret_sharpe,\n",
    "        ret_mdd=ret_mdd,\n",
    "    )\n",
    "    return result, ret_s\n",
    " "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Read and combine result to show"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "result = []\n",
    "instruments = 'sp500'\n",
    "for num in [1]:\n",
    "    for seed in range(1):\n",
    "    \n",
    "        cur_seed_ic = []\n",
    "        cur_seed_ric = []\n",
    "        for train_end in range(2016,2017):\n",
    "                #'/path/to/save/results'\n",
    "                save_dir = f'out_gp/{instruments}_{train_end}_day_{seed}' \n",
    "\n",
    "                returned = get_data_by_year(\n",
    "                    train_start = 2010,train_end=train_end,valid_year=train_end+1,test_year =train_end+2,\n",
    "                    instruments=instruments, target=target,freq='day',\n",
    "                    qlib_path = '/your_path/data/qlib_data/us_data_qlib'\n",
    "                )\n",
    "                data_all,data,data_valid,data_valid_withhead,data_test,data_test_withhead,name = returned\n",
    "                pred = torch.load(f\"{save_dir}/pred_{num}.pt\")\n",
    "                \n",
    "                tgt = target.evaluate(data_all)[-data_test.n_days:,:].to(\"cpu\")\n",
    "                res, ret_s = get_tensor_metrics(torch.tensor(pred), torch.tensor(tgt))\n",
    "                print(pd.DataFrame(res,index=[\"Test\"]))\n",
    "                np.save(f\"{save_dir}/ret_s.npy\", ret_s)\n",
    "            "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
