{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取数据，数据分析，跑baseline。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "number of products: 139\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import xgboost as xgb\n",
    "from datetime import datetime\n",
    "import math\n",
    "from statsmodels.tsa.stattools import grangercausalitytests \n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "FILENAME_TESTSET='original/testSet_predict_table.csv'\n",
    "FILENAME_TRAIN_AND_VALID='original/product_info_simple_final_train.csv'\n",
    "PATH_TESTSET_PREDICTIONS='predict_tables/'\n",
    "\n",
    "data_test = pd.read_csv(FILENAME_TESTSET)\n",
    "data_test['formatted_transaction_date']=pd.to_datetime(data_test['transaction_date'].apply(lambda x:str(x)))\n",
    "list_product_pid = data_test['product_pid'].unique()\n",
    "print('number of products:',len(list_product_pid))\n",
    "\n",
    "\n",
    "data_train_and_valid = pd.read_csv(FILENAME_TRAIN_AND_VALID) \n",
    "data_train_and_valid['formatted_transaction_date']=pd.to_datetime(data_train_and_valid['transaction_date'].apply(lambda x:str(x)))\n",
    "\n",
    "#处理total_net_value中的nan\n",
    "if np.isnan(data_train_and_valid['total_net_value']).all():\n",
    "    # 若不存在非nan的数，则将df['total_net_value']全部置为0\n",
    "    data_train_and_valid['total_net_value'] = 0\n",
    "else:\n",
    "    # 若存在非nan的数\n",
    "    # 用前面且离它最近的非nan数填充\n",
    "    data_train_and_valid['total_net_value'].fillna(method='ffill', inplace=True)\n",
    "    # 若前面不存在非nan的数，则用后面且离它最近的非nan数填充\n",
    "    data_train_and_valid['total_net_value'].fillna(method='bfill', inplace=True)\n",
    "#删除during_days列，因为grangercausalitytests表明它对3个目标序列的预测都无帮助。\n",
    "data_train_and_valid.drop('during_days',axis=1,inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "#添加num_days_from_base列。\n",
    "timestamp_base = datetime.strptime(str(20210104), \"%Y%m%d\").timestamp()\n",
    "## 58320000是675天，2021-01-04与2022-11-10相差675天。\n",
    "# timestamp_base=timestamp_base+58320000\n",
    "##int()是因为浮点数运算会出现小数。\n",
    "transform_date = lambda x:int((x.timestamp()-timestamp_base)/86400)\n",
    "data_train_and_valid['num_days_from_base']=data_train_and_valid['formatted_transaction_date'].apply(transform_date)\n",
    "data_test['num_days_from_base']=data_test['formatted_transaction_date'].apply(transform_date)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [],
   "source": [
    "#评价指标WMAPE\n",
    "def wmape_score( actuals,predictions):\n",
    "    actuals=list(actuals)\n",
    "    predictions=list(predictions)\n",
    "\n",
    "    len_predicitons = len(predictions)\n",
    "    wmape = 0.0\n",
    "    sum_real = sum([abs(i) for i in actuals])\n",
    "    \n",
    "    for i in range(len_predicitons):\n",
    "        pred = predictions[i]\n",
    "        real = actuals[i]\n",
    "        wmape += abs(pred - real)\n",
    "    #若分母为0，判断分子。若分子为0，输出0,；若分子不为0，输出100.\n",
    "    if sum_real==0:\n",
    "        if wmape==sum_real:\n",
    "            return 0\n",
    "        else:\n",
    "            return 100\n",
    "    wmape /= sum_real\n",
    "    \n",
    "    return wmape\n",
    "def compute_sum_diff_and_sum_real(actuals,predictions):\n",
    "    actuals=list(actuals)\n",
    "    predictions=list(predictions)\n",
    "    len_predicitons = len(predictions)\n",
    "    sum_diff = 0.0\n",
    "    sum_real = sum([abs(i) for i in actuals])\n",
    "    \n",
    "    for i in range(len_predicitons):\n",
    "        pred = predictions[i]\n",
    "        real = actuals[i]\n",
    "        sum_diff += abs(pred - real)\n",
    "    return sum_diff,sum_real"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['apply_amt',\n",
       " 'redeem_amt',\n",
       " 'net_in_amt',\n",
       " 'uv_fundown',\n",
       " 'uv_stableown',\n",
       " 'uv_fundopt',\n",
       " 'uv_fundmarket',\n",
       " 'uv_termmarket',\n",
       " 'total_net_value',\n",
       " 'num_days_from_base']"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#提取出train_and_valid_set的各列\n",
    "col_names=list(data_train_and_valid.columns)\n",
    "col_names.remove('product_pid')\n",
    "col_names.remove('transaction_date')\n",
    "col_names.remove('formatted_transaction_date')\n",
    "col_names"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "starting product1, the 1th product\n",
      "starting product100, the 2th product\n",
      "starting product101, the 3th product\n",
      "starting product102, the 4th product\n",
      "starting product103, the 5th product\n",
      "starting product104, the 6th product\n",
      "starting product105, the 7th product\n",
      "starting product106, the 8th product\n",
      "starting product107, the 9th product\n",
      "starting product108, the 10th product\n",
      "starting product109, the 11th product\n",
      "starting product110, the 12th product\n",
      "starting product111, the 13th product\n",
      "starting product112, the 14th product\n",
      "starting product113, the 15th product\n",
      "starting product114, the 16th product\n",
      "starting product115, the 17th product\n",
      "starting product116, the 18th product\n",
      "starting product117, the 19th product\n",
      "starting product118, the 20th product\n",
      "starting product119, the 21th product\n",
      "starting product12, the 22th product\n",
      "starting product120, the 23th product\n",
      "starting product121, the 24th product\n",
      "starting product122, the 25th product\n",
      "starting product123, the 26th product\n",
      "starting product124, the 27th product\n",
      "starting product125, the 28th product\n",
      "starting product126, the 29th product\n",
      "starting product128, the 30th product\n",
      "starting product129, the 31th product\n",
      "starting product13, the 32th product\n",
      "starting product130, the 33th product\n",
      "starting product131, the 34th product\n",
      "starting product132, the 35th product\n",
      "starting product133, the 36th product\n",
      "starting product134, the 37th product\n",
      "starting product135, the 38th product\n",
      "starting product136, the 39th product\n",
      "starting product137, the 40th product\n",
      "starting product138, the 41th product\n",
      "starting product14, the 42th product\n",
      "starting product140, the 43th product\n",
      "starting product141, the 44th product\n",
      "starting product142, the 45th product\n",
      "starting product143, the 46th product\n",
      "starting product144, the 47th product\n",
      "starting product145, the 48th product\n",
      "starting product146, the 49th product\n",
      "starting product147, the 50th product\n",
      "starting product148, the 51th product\n",
      "starting product149, the 52th product\n",
      "starting product15, the 53th product\n",
      "starting product150, the 54th product\n",
      "starting product151, the 55th product\n",
      "starting product152, the 56th product\n",
      "starting product153, the 57th product\n",
      "starting product155, the 58th product\n",
      "starting product156, the 59th product\n",
      "starting product157, the 60th product\n",
      "starting product16, the 61th product\n",
      "starting product17, the 62th product\n",
      "starting product19, the 63th product\n",
      "starting product2, the 64th product\n",
      "starting product20, the 65th product\n",
      "starting product24, the 66th product\n",
      "starting product26, the 67th product\n",
      "starting product27, the 68th product\n",
      "starting product28, the 69th product\n",
      "starting product29, the 70th product\n",
      "starting product3, the 71th product\n",
      "starting product30, the 72th product\n",
      "starting product31, the 73th product\n",
      "starting product32, the 74th product\n",
      "starting product33, the 75th product\n",
      "starting product34, the 76th product\n",
      "starting product35, the 77th product\n",
      "starting product37, the 78th product\n",
      "starting product38, the 79th product\n",
      "starting product39, the 80th product\n",
      "starting product40, the 81th product\n",
      "starting product41, the 82th product\n",
      "starting product42, the 83th product\n",
      "starting product43, the 84th product\n",
      "starting product45, the 85th product\n",
      "starting product46, the 86th product\n",
      "starting product47, the 87th product\n",
      "starting product48, the 88th product\n",
      "starting product49, the 89th product\n",
      "starting product50, the 90th product\n",
      "starting product51, the 91th product\n",
      "starting product52, the 92th product\n",
      "starting product53, the 93th product\n",
      "starting product54, the 94th product\n",
      "starting product55, the 95th product\n",
      "starting product56, the 96th product\n",
      "starting product57, the 97th product\n",
      "starting product58, the 98th product\n",
      "starting product59, the 99th product\n",
      "starting product6, the 100th product\n",
      "starting product60, the 101th product\n",
      "starting product61, the 102th product\n",
      "starting product62, the 103th product\n",
      "starting product63, the 104th product\n",
      "starting product64, the 105th product\n",
      "starting product65, the 106th product\n",
      "starting product66, the 107th product\n",
      "starting product67, the 108th product\n",
      "starting product68, the 109th product\n",
      "starting product70, the 110th product\n",
      "starting product71, the 111th product\n",
      "starting product72, the 112th product\n",
      "starting product73, the 113th product\n",
      "starting product75, the 114th product\n",
      "starting product76, the 115th product\n",
      "starting product78, the 116th product\n",
      "starting product79, the 117th product\n",
      "starting product8, the 118th product\n",
      "starting product80, the 119th product\n",
      "starting product81, the 120th product\n",
      "starting product82, the 121th product\n",
      "starting product83, the 122th product\n",
      "starting product84, the 123th product\n",
      "starting product85, the 124th product\n",
      "starting product86, the 125th product\n",
      "starting product87, the 126th product\n",
      "starting product88, the 127th product\n",
      "starting product89, the 128th product\n",
      "starting product9, the 129th product\n",
      "starting product90, the 130th product\n",
      "starting product91, the 131th product\n",
      "starting product92, the 132th product\n",
      "starting product93, the 133th product\n",
      "starting product94, the 134th product\n",
      "starting product95, the 135th product\n",
      "starting product96, the 136th product\n",
      "starting product97, the 137th product\n",
      "starting product98, the 138th product\n",
      "starting product99, the 139th product\n",
      "wmape on train set: 0.026023770408451614\n"
     ]
    }
   ],
   "source": [
    "#为每一个product单独创建预测apply, redeem, net的model\n",
    "models={}\n",
    "models['apply_amt']={}\n",
    "models['redeem_amt']={}\n",
    "models['net_in_amt']={}\n",
    "\n",
    "dict_helpful_features={}\n",
    "dict_helpful_features['apply_amt']={}\n",
    "dict_helpful_features['redeem_amt']={}\n",
    "dict_helpful_features['net_in_amt']={}\n",
    "\n",
    "\n",
    "sum_diff_train=0\n",
    "sum_real_train=0\n",
    "\n",
    "for i,product_pid in enumerate(list_product_pid):\n",
    "    print(f'starting {product_pid}, the {i+1}th product')\n",
    "    data_this_product_train_and_valid=data_train_and_valid[data_train_and_valid['product_pid']==product_pid]\n",
    "    # #最后10个样本划分为验证集\n",
    "    # data_this_product_train=data_this_product_train_and_valid.iloc[:-10,:]\n",
    "    # data_this_product_valid=data_this_product_train_and_valid.iloc[-10:,:]\n",
    "    #不划分验证集\n",
    "    data_this_product_train=data_this_product_train_and_valid\n",
    "\n",
    "    for col_y in ['apply_amt','redeem_amt','net_in_amt']:\n",
    "        #找有帮助的其他序列\n",
    "        dict_helpful_features[col_y][product_pid]=[]\n",
    "        for col_x in [i for i in col_names if i!=col_y]:\n",
    "            if data_this_product_train[col_y].max()==data_this_product_train[col_y].min():\n",
    "                #y序列是常数序列\n",
    "                continue\n",
    "            if data_this_product_train[col_x].max()==data_this_product_train[col_x].min():\n",
    "                #x序列是常数序列\n",
    "                continue\n",
    "            result=grangercausalitytests(data_this_product_train[[col_y, col_x]], maxlag=1,verbose=False)\n",
    "            p_values_lag1=[test[1] for test in result[1][0].values()]\n",
    "            # p_values_lag2=[test[1] for test in result[2][0].values()]\n",
    "            if all(p < 0.05 for p in p_values_lag1):\n",
    "                dict_helpful_features[col_y][product_pid].append(col_x)\n",
    "\n",
    "        #若dict_helpful_features[col_y]为空，则添加'num_days_from_base'列\n",
    "        if len(dict_helpful_features[col_y][product_pid])==0:\n",
    "            dict_helpful_features[col_y][product_pid].append('num_days_from_base')\n",
    "        X_train=data_this_product_train[dict_helpful_features[col_y][product_pid]]\n",
    "        y_train=data_this_product_train[col_y]\n",
    "\n",
    "        model = xgb.XGBRegressor()\n",
    "        model.fit(X_train, y_train)\n",
    "        train_preds=model.predict(X_train)\n",
    "    \n",
    "        this_sum_diff_train,this_sum_real_train=compute_sum_diff_and_sum_real(y_train,train_preds)\n",
    "        sum_diff_train+=this_sum_diff_train\n",
    "        sum_real_train+=this_sum_real_train\n",
    "\n",
    "        models[col_y][product_pid]=model\n",
    "\n",
    "wmape_train=sum_diff_train/sum_real_train\n",
    "print(f'wmape on train set: {wmape_train}')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "#针对测试集，输出预测结果¶\n",
    "for product_pid in list_product_pid:\n",
    "    data_this_product_train_and_valid=data_train_and_valid[data_train_and_valid['product_pid']==product_pid]\n",
    "    data_this_product_test=data_test[data_test['product_pid']==product_pid]\n",
    "    #确保list_helful_features[xx]中的列在data_this_product_test中都存在\n",
    "    for col_x in col_names:\n",
    "        if col_x not in data_this_product_test.columns:\n",
    "            data_this_product_test[col_x]=data_this_product_train_and_valid[col_x][-10:]\n",
    "    \n",
    "    X_test_apply=data_this_product_test[dict_helpful_features['apply_amt'][product_pid]]\n",
    "    pred_apply = models['apply_amt'][product_pid].predict(X_test_apply)\n",
    "\n",
    "    X_test_redeem=data_this_product_test[dict_helpful_features['redeem_amt'][product_pid]]\n",
    "    pred_redeem = models['redeem_amt'][product_pid].predict(X_test_redeem)\n",
    "    \n",
    "    X_test_net=data_this_product_test[dict_helpful_features['net_in_amt'][product_pid]]\n",
    "    pred_net = models['net_in_amt'][product_pid].predict(X_test_net)\n",
    "\n",
    "    # 将预测结果保存到data中对应的列中\n",
    "    data_test.loc[data_test['product_pid']==product_pid,\"apply_amt_pred\"] = pred_apply\n",
    "    data_test.loc[data_test['product_pid']==product_pid,\"redeem_amt_pred\"] = pred_redeem\n",
    "    data_test.loc[data_test['product_pid']==product_pid,\"net_in_amt_pred\"] = pred_net\n",
    "data_test[\"apply_amt_pred\"] = (data_test[\"net_in_amt_pred\"]+data_test[\"redeem_amt_pred\"])*0.5+data_test[\"apply_amt_pred\"]*0.5\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "#保存测试集的预测结果到本地\n",
    "way_feature='grangerTested'\n",
    "way_model='xgb'\n",
    "way_split='noValid'\n",
    "way_test='directRedeemNet'\n",
    "data_test.to_csv(PATH_TESTSET_PREDICTIONS+f\"predict_table_feature-{way_feature}_model-{way_model}_split-{way_split}_test-{way_test}.csv\",index=False,columns=['product_pid','transaction_date','apply_amt_pred','redeem_amt_pred','net_in_amt_pred'])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "AFAC2023ApplyAndRedeem_env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.17"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
