{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#读取数据，数据分析，跑baseline。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import xgboost as xgb\n",
    "from datetime import datetime\n",
    "from statsmodels.tsa.stattools import grangercausalitytests\n",
    "from statsmodels.tsa.seasonal import seasonal_decompose\n",
    "from statsmodels.tsa.stattools import adfuller\n",
    "\n",
    "import warnings\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "FILENAME_TESTSET = 'original/testSet_predict_table.csv'\n",
    "FILENAME_TRAIN_AND_VALID = 'original/product_info_simple_final_train.csv'\n",
    "FILENAME_YIELD = 'original/cbyieldcurve_info_final.csv'\n",
    "FILENAME_TIME_INFO = 'original/time_info_final.csv'\n",
    "PATH_TESTSET_PREDICTIONS = 'predict_tables/'\n",
    "PATH_PROCESSED_DATA = 'processed/'\n",
    "\n",
    "data_test = pd.read_csv(FILENAME_TESTSET)\n",
    "data_test['formatted_transaction_date'] = pd.to_datetime(\n",
    "    data_test['transaction_date'].apply(lambda x: str(x)))\n",
    "\n",
    "data_train_and_valid['formatted_transaction_date'] = pd.to_datetime(\n",
    "    data_train_and_valid['transaction_date'].apply(lambda x: str(x)))\n",
    "\n",
    "cbyieldcurve_info_final = pd.read_csv(FILENAME_YIELD)\n",
    "time_info_final = pd.read_csv(FILENAME_TIME_INFO)\n",
    "\n",
    "#处理total_net_value中的nan\n",
    "unique_pids = data_train_and_valid['product_pid'].unique()\n",
    "for pid in unique_pids:\n",
    "    mask = data_train_and_valid['product_pid'] == pid\n",
    "    if np.isnan(data_train_and_valid.loc[mask, 'total_net_value']).all():\n",
    "        # 若不存在非nan的数，则将total_net_value全部置为0\n",
    "        data_train_and_valid.loc[mask, 'total_net_value'] = 0\n",
    "    else:\n",
    "        # 若存在非nan的数，则分组填充total_net_value\n",
    "        data_train_and_valid.loc[mask, 'total_net_value']=data_train_and_valid.loc[mask, 'total_net_value'].fillna(method='ffill', inplace=False)\n",
    "        data_train_and_valid.loc[mask, 'total_net_value']=data_train_and_valid.loc[mask, 'total_net_value'].fillna(method='bfill', inplace=False)\n",
    "\n",
    "#删除during_days列，因为grangercausalitytests表明它对3个目标序列的预测都无帮助。\n",
    "data_train_and_valid.drop('during_days', axis=1, inplace=True)\n",
    "\n",
    "# 对于每一行数据，在cbyieldcurve_info_final中寻找对应的yield属性值\n",
    "data_train_and_valid['yield'] = data_train_and_valid['transaction_date'].apply(\n",
    "    lambda x: cbyieldcurve_info_final[cbyieldcurve_info_final['enddate'] ==\n",
    "                                      int(x)].iloc[0]['yield'])\n",
    "\n",
    "# 在time_info_final中寻找stat_date具有相同值的行，并提取相应的属性值\n",
    "data_train_and_valid['is_week_end'] = data_train_and_valid[\n",
    "    'transaction_date'].apply(lambda x: time_info_final[time_info_final[\n",
    "        'stat_date'] == int(x)].iloc[0]['is_week_end'])\n",
    "data_train_and_valid['is_month_end'] = data_train_and_valid[\n",
    "    'transaction_date'].apply(lambda x: time_info_final[time_info_final[\n",
    "        'stat_date'] == int(x)].iloc[0]['is_month_end'])\n",
    "\n",
    "# 在time_info_final中寻找stat_date具有相同值的行，并提取相应的属性值\n",
    "data_test['is_week_end'] = data_test[\n",
    "    'transaction_date'].apply(lambda x: time_info_final[time_info_final[\n",
    "        'stat_date'] == int(x)].iloc[0]['is_week_end'])\n",
    "data_test['is_month_end'] = data_test[\n",
    "    'transaction_date'].apply(lambda x: time_info_final[time_info_final[\n",
    "        'stat_date'] == int(x)].iloc[0]['is_month_end'])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#添加num_days_from_base列。\n",
    "timestamp_base = datetime.strptime(str(20210104), \"%Y%m%d\").timestamp()\n",
    "## 58320000是675天，2021-01-04与2022-11-10相差675天。\n",
    "# timestamp_base=timestamp_base+58320000\n",
    "##int()是因为浮点数运算会出现小数。\n",
    "transform_date = lambda x:int((x.timestamp()-timestamp_base)/86400)\n",
    "data_train_and_valid['num_days_from_base']=data_train_and_valid['formatted_transaction_date'].apply(transform_date)\n",
    "data_test['num_days_from_base']=data_test['formatted_transaction_date'].apply(transform_date)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "#把所有特征合并到一起，得新的训练集和测试集，保存到本地\n",
    "data_train_and_valid.to_csv(PATH_PROCESSED_DATA+'train_valid_set_combined.csv',index=False)\n",
    "data_test.to_csv(PATH_PROCESSED_DATA+'test_set_combined.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#观察测试集中的product个数\n",
    "list_product_pid = data_test['product_pid'].unique()\n",
    "print('number of products:', len(list_product_pid))\n",
    "\n",
    "#观察原始训练集中的数据分布。\n",
    "num_examples=[]\n",
    "for product_pid in list_product_pid:\n",
    "    num_examples.append(len(data_train_and_valid[data_train_and_valid['product_pid']==product_pid]))\n",
    "print('product平均个数',np.mean(num_examples))\n",
    "print('product个数最多',max(num_examples))\n",
    "print('product个数最少',min(num_examples))\n",
    "\n",
    "deviations=[]\n",
    "for product_pid in list_product_pid:\n",
    "    data_this_product_train_and_valid=data_train_and_valid[data_train_and_valid['product_pid']==product_pid]\n",
    "    num_last_examples=10\n",
    "    mean_last_examples=np.mean(data_this_product_train_and_valid['apply_amt'][-num_last_examples:])\n",
    "    mean_all=np.mean(data_this_product_train_and_valid['apply_amt'])\n",
    "    percent=abs(mean_last_examples-mean_all)/mean_last_examples\n",
    "    print(f'{product_pid}的整体偏离最后{num_last_examples}的程度为',percent)\n",
    "    deviations.append(percent)\n",
    "print('最大偏离程度',max(deviations))\n",
    "print('最小偏离程度',min(deviations))\n",
    "plt.boxplot(deviations)\n",
    "#观察原始训练集中各product的第1个日期分别是什么。\n",
    "times=[]\n",
    "for product_pid in list_product_pid:\n",
    "    time=data_train_and_valid[data_train_and_valid['product_pid']==product_pid].iloc[0,:]['formatted_transaction_date']\n",
    "    times.append(time)\n",
    "sorted(pd.Series(times).unique())\n",
    "#plot原始训练集中的时间序列\n",
    "# Draw Plot\n",
    "def plot_time_series(x,\n",
    "                     y,\n",
    "                     df=None,\n",
    "                     title=\"\",\n",
    "                     xlabel='Date',\n",
    "                     ylabel='Value',\n",
    "                     dpi=100):\n",
    "    plt.figure(figsize=(16, 5), dpi=dpi)\n",
    "    plt.plot(x, y, color='tab:red')\n",
    "    plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "for product_pid in list_product_pid[:3]:\n",
    "    for label_class in ['apply_amt','redeem_amt','net_in_amt']:\n",
    "        data_this_product_train_and_valid = data_train_and_valid[\n",
    "            data_train_and_valid['product_pid'] == product_pid]\n",
    "        plot_time_series(\n",
    "            x=data_this_product_train_and_valid['formatted_transaction_date'],\n",
    "            y=data_this_product_train_and_valid[label_class],\n",
    "            title=f'{product_pid} {label_class}',)\n",
    "\n",
    "#plot原始训练集中的时间序列分解\n",
    "for product_pid in list_product_pid[:3]:\n",
    "    for label_class in ['apply_amt','redeem_amt','net_in_amt']:\n",
    "        data_this_product_train_and_valid = data_train_and_valid[\n",
    "            data_train_and_valid['product_pid'] == product_pid]\n",
    "        ts = data_this_product_train_and_valid[label_class]\n",
    "        ts.index = data_this_product_train_and_valid[\n",
    "            'formatted_transaction_date']\n",
    "\n",
    "        plt.rcParams.update({'figure.figsize': (10, 10)})\n",
    "\n",
    "        if not np.any(ts <= 0):\n",
    "            # Multiplicative Decomposition\n",
    "            result_mul = seasonal_decompose(ts,\n",
    "                                            model='multiplicative',\n",
    "                                            extrapolate_trend='freq',\n",
    "                                            period=5)\n",
    "            result_mul.plot().suptitle(\n",
    "                f'Multiplicative Decompose of {product_pid} {label_class}', fontsize=22)\n",
    "\n",
    "        # Additive Decomposition\n",
    "        result_add = seasonal_decompose(ts,\n",
    "                                        model='additive',\n",
    "                                        extrapolate_trend='freq',\n",
    "                                        period=5)\n",
    "        result_add.plot().suptitle(f'Additive Decompose of {product_pid} {label_class}',\n",
    "                                fontsize=22)\n",
    "\n",
    "        plt.show()\n",
    "\n",
    "#观察各时间序列的平稳性\n",
    "num_stationary=0\n",
    "label_class='redeem_amt'\n",
    "\n",
    "for product_pid in list_product_pid:\n",
    "    # if product_pid in ['product1', 'product100', 'product99']:\n",
    "        data_this_product_train_and_valid = data_train_and_valid[data_train_and_valid['product_pid'] == product_pid]\n",
    "        ts = data_this_product_train_and_valid[label_class]\n",
    "        ts.index = data_this_product_train_and_valid['formatted_transaction_date']\n",
    "        if ts.max() == ts.min():\n",
    "            print(f'constant {label_class} Product: {product_pid}')\n",
    "            num_stationary += 1\n",
    "            continue\n",
    "        # 检验时间序列的平稳性\n",
    "        result = adfuller(ts)\n",
    "        if result[1] < 0.05:\n",
    "            num_stationary += 1\n",
    "        # # 打印结果\n",
    "        # print(f'Product: {product_pid}')\n",
    "        # print('Results of Augmented Dickey-Fuller Test:')\n",
    "        # print(f'Test Statistic: {result[0]}')\n",
    "        # print(f'p-value: {result[1]}')\n",
    "        # print('Critical Values:')\n",
    "        # for key, value in result[4].items():\n",
    "        #     print(f'   {key}: {value}')\n",
    "print(f'The number of products that have stationary {label_class} is {num_stationary}')\n",
    "\n",
    "#观察其他序列是否有助于预测apply/redeem\n",
    "\n",
    "cols_y=['apply_amt','redeem_amt','net_in_amt']\n",
    "cols_x=cols_x\n",
    "for col_x in cols_x:\n",
    "    for col_y in cols_y:\n",
    "        num_is_helpful=0\n",
    "        for product_pid in list_product_pid:\n",
    "            data_this_product_train_and_valid=data_train_and_valid[data_train_and_valid['product_pid']==product_pid]\n",
    "\n",
    "            if data_this_product_train_and_valid[col_y].max()==data_this_product_train_and_valid[col_y].min():\n",
    "                #y序列是常数序列\n",
    "                continue\n",
    "            if data_this_product_train_and_valid[col_x].max()==data_this_product_train_and_valid[col_x].min():\n",
    "                #x序列是常数序列\n",
    "                continue\n",
    "            result=grangercausalitytests(data_this_product_train_and_valid[[col_y, col_x]], maxlag=1,verbose=False)\n",
    "            p_values_lag1=[test[1] for test in result[1][0].values()]\n",
    "            # p_values_lag2=[test[1] for test in result[2][0].values()]\n",
    "            if all(p < 0.05 for p in p_values_lag1):\n",
    "                # print(f'{product_pid}, p value for lag 1, {p_values_lag1}')\n",
    "                num_is_helpful+=1\n",
    "        print(f'num of {col_x} products helpful for {col_y}:', num_is_helpful)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#评价指标WMAPE\n",
    "def wmape_score( actuals,predictions):\n",
    "    actuals=list(actuals)\n",
    "    predictions=list(predictions)\n",
    "\n",
    "    len_predicitons = len(predictions)\n",
    "    wmape = 0.0\n",
    "    sum_real = sum([abs(i) for i in actuals])\n",
    "    \n",
    "    for i in range(len_predicitons):\n",
    "        pred = predictions[i]\n",
    "        real = actuals[i]\n",
    "        wmape += abs(pred - real)\n",
    "    #若分母为0，判断分子。若分子为0，输出0,；若分子不为0，输出100.\n",
    "    if sum_real==0:\n",
    "        if wmape==sum_real:\n",
    "            return 0\n",
    "        else:\n",
    "            return 100\n",
    "    wmape /= sum_real\n",
    "    \n",
    "    return wmape\n",
    "def compute_sum_diff_and_sum_real(actuals,predictions):\n",
    "    actuals=list(actuals)\n",
    "    predictions=list(predictions)\n",
    "    len_predicitons = len(predictions)\n",
    "    sum_diff = 0.0\n",
    "    sum_real = sum([abs(i) for i in actuals])\n",
    "    \n",
    "    for i in range(len_predicitons):\n",
    "        pred = predictions[i]\n",
    "        real = actuals[i]\n",
    "        sum_diff += abs(pred - real)\n",
    "    return sum_diff,sum_real"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#为每一个product单独创建预测apply, redeem的model\n",
    "models_apply = {} \n",
    "models_redeem = {}\n",
    "\n",
    "sum_diff_train=0\n",
    "sum_real_train=0\n",
    "sum_diff_valid=0\n",
    "sum_real_valid=0\n",
    "\n",
    "for i,product_pid in enumerate(list_product_pid):\n",
    "    print(f'starting {product_pid}, the {i+1}th product')\n",
    "    data_this_product_train_and_valid=data_train_and_valid[data_train_and_valid['product_pid']==product_pid]\n",
    "    #按照7:1划分训练集和验证集\n",
    "    # len_this_product=data_this_product_train_and_valid.shape[0]\n",
    "    # data_this_product_train=data_this_product_train_and_valid.iloc[:len_this_product*7//8,:]\n",
    "    # data_this_product_valid=data_this_product_train_and_valid.iloc[len_this_product*7//8:,:]\n",
    "    #最后10个样本划分为验证集\n",
    "    data_this_product_train=data_this_product_train_and_valid.iloc[:-5,:]\n",
    "    data_this_product_valid=data_this_product_train_and_valid.iloc[-5:,:]\n",
    "\n",
    "    #训练出apply的模型\n",
    "    X_train_apply=data_this_product_train[\"num_days_from_base\"].values.reshape(-1,1)\n",
    "    y_train_apply=data_this_product_train[\"apply_amt\"]\n",
    "    X_valid_apply=data_this_product_valid[\"num_days_from_base\"].values.reshape(-1,1)\n",
    "    y_valid_apply=data_this_product_valid[\"apply_amt\"]\n",
    "    \n",
    "    wmape_best_apply = 100#初始值\n",
    "    ## eta指学习率\n",
    "    for eta in [0.001,0.01,0.1, 0.3,0.5]:\n",
    "        for max_depth in [3, 4, 5,10,50]:\n",
    "            model = xgb.XGBRegressor(eta=eta, max_depth=max_depth)\n",
    "            model.fit(X_train_apply, y_train_apply)\n",
    "            valid_preds_apply = model.predict(X_valid_apply)\n",
    "            wmape_valid_apply = wmape_score(y_valid_apply, valid_preds_apply)\n",
    "            \n",
    "\n",
    "            if wmape_valid_apply < wmape_best_apply:\n",
    "                wmape_best_apply = wmape_valid_apply\n",
    "                eta_best_apply = eta\n",
    "                max_depth_best_apply = max_depth\n",
    "                this_sum_diff_valid_apply,this_sum_real_valid_apply=compute_sum_diff_and_sum_real(y_valid_apply,valid_preds_apply)\n",
    "\n",
    "    model = xgb.XGBRegressor(eta=eta_best_apply, max_depth=max_depth_best_apply)\n",
    "    model.fit(X_train_apply, y_train_apply)\n",
    "    train_preds_apply=model.predict(X_train_apply)\n",
    "    wmape_train_apply = wmape_score(y_train_apply,train_preds_apply)\n",
    "    this_sum_diff_train_apply,this_sum_real_train_apply=compute_sum_diff_and_sum_real(y_train_apply,train_preds_apply)\n",
    "\n",
    "    # print(f'Best parameters: eta={eta_best_apply}, max_depth={max_depth_best_apply}')\n",
    "    # print(f'wmape on apply of train set: {wmape_train_apply}')\n",
    "    print(f'wmape on apply of valid set: {wmape_best_apply}')\n",
    "\n",
    "    sum_diff_valid+=this_sum_diff_valid_apply\n",
    "    sum_real_valid+=this_sum_real_valid_apply\n",
    "    sum_diff_train+=this_sum_diff_train_apply\n",
    "    sum_real_train+=this_sum_real_train_apply\n",
    "\n",
    "    models_apply[product_pid]=model\n",
    "\n",
    "    #训练出redeem的模型\n",
    "    X_train_redeem=data_this_product_train[\"num_days_from_base\"].values.reshape(-1,1)\n",
    "    y_train_redeem=data_this_product_train[\"redeem_amt\"]\n",
    "    X_valid_redeem=data_this_product_valid[\"num_days_from_base\"].values.reshape(-1,1)\n",
    "    y_valid_redeem=data_this_product_valid[\"redeem_amt\"]\n",
    "    \n",
    "    wmape_best_redeem = 100#初始值\n",
    "    ## eta指学习率\n",
    "    for eta in [0.001,0.01,0.1, 0.3,0.5]:\n",
    "        for max_depth in [3, 4, 5,10,50]:\n",
    "            model = xgb.XGBRegressor(eta=eta, max_depth=max_depth)\n",
    "            model.fit(X_train_redeem, y_train_redeem)\n",
    "            valid_preds_redeem = model.predict(X_valid_redeem)\n",
    "            wmape_valid_redeem = wmape_score(y_valid_redeem, valid_preds_redeem)\n",
    "\n",
    "            if wmape_valid_redeem < wmape_best_redeem:\n",
    "                wmape_best_redeem = wmape_valid_redeem\n",
    "                eta_best_redeem = eta\n",
    "                max_depth_best_redeem = max_depth\n",
    "                this_sum_diff_valid_redeem,this_sum_real_valid_redeem=compute_sum_diff_and_sum_real(y_valid_redeem,valid_preds_redeem)\n",
    "\n",
    "    model = xgb.XGBRegressor(eta=eta_best_redeem, max_depth=max_depth_best_redeem)\n",
    "    model.fit(X_train_redeem, y_train_redeem)\n",
    "    train_preds_redeem=model.predict(X_train_redeem)\n",
    "    wmape_train_redeem = wmape_score(y_train_redeem,train_preds_redeem)\n",
    "    this_sum_diff_train_redeem,this_sum_real_train_redeem=compute_sum_diff_and_sum_real(y_train_redeem,train_preds_redeem)\n",
    "\n",
    "\n",
    "    # print(f'Best parameters: eta={eta_best_redeem}, max_depth={max_depth_best_redeem}')\n",
    "    # print(f'wmape on redeem of train set: {wmape_train_redeem}')\n",
    "    print(f'wmape on redeem of valid set: {wmape_best_redeem}')\n",
    "\n",
    "    sum_diff_valid+=this_sum_diff_valid_redeem\n",
    "    sum_real_valid+=this_sum_real_valid_redeem\n",
    "    sum_diff_train+=this_sum_diff_train_redeem\n",
    "    sum_real_train+=this_sum_real_train_redeem\n",
    "\n",
    "    models_redeem[product_pid]=model\n",
    "\n",
    "\n",
    "wmape_train=sum_diff_train/sum_real_train\n",
    "wmape_valid=sum_diff_valid/sum_real_valid\n",
    "print(f'wmape on train set: {wmape_train}')\n",
    "print(f'wmape on valid set: {wmape_valid}')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#为每一个product单独创建预测apply, redeem, net的model\n",
    "models={}\n",
    "models['apply_amt']={}\n",
    "models['redeem_amt']={}\n",
    "models['net_in_amt']={}\n",
    "\n",
    "sum_diff_train=0\n",
    "sum_real_train=0\n",
    "cols_x=[\"num_days_from_base\",'is_week_end']\n",
    "\n",
    "for i,product_pid in enumerate(list_product_pid):\n",
    "    print(f'starting {product_pid}, the {i+1}th product')\n",
    "    data_this_product_train_and_valid=data_train_and_valid[data_train_and_valid['product_pid']==product_pid]\n",
    "    #不划分验证集\n",
    "    data_this_product_train=data_this_product_train_and_valid\n",
    "\n",
    "    X_train=data_this_product_train[cols_x]\n",
    "    \n",
    "    for col_y in ['apply_amt','redeem_amt','net_in_amt']:\n",
    "        y_train=data_this_product_train[col_y]\n",
    "\n",
    "        model = xgb.XGBRegressor()\n",
    "        model.fit(X_train, y_train)\n",
    "        train_preds=model.predict(X_train)\n",
    "\n",
    "        this_sum_diff_train,this_sum_real_train=compute_sum_diff_and_sum_real(y_train,train_preds)\n",
    "        sum_diff_train+=this_sum_diff_train\n",
    "        sum_real_train+=this_sum_real_train\n",
    "\n",
    "        models[col_y][product_pid]=model\n",
    "\n",
    "wmape_train=sum_diff_train/sum_real_train\n",
    "print(f'wmape on train set: {wmape_train}')\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#针对测试集，输出预测结果¶\n",
    "for product_pid in list_product_pid:\n",
    "    data_this_product_test=data_test[data_test['product_pid']==product_pid]\n",
    "\n",
    "    X_test_apply=data_this_product_test[cols_x]\n",
    "    pred_apply = models['apply_amt'][product_pid].predict(X_test_apply)\n",
    "\n",
    "    X_test_redeem=data_this_product_test[cols_x]\n",
    "    pred_redeem = models['redeem_amt'][product_pid].predict(X_test_redeem)\n",
    "    \n",
    "    X_test_net_in=data_this_product_test[cols_x]\n",
    "    pred_net_in = models['net_in_amt'][product_pid].predict(X_test_net_in)\n",
    "\n",
    "    # 将预测结果保存到data中对应的列中\n",
    "    data_test.loc[data_test['product_pid']==product_pid,\"apply_amt_pred\"] = pred_apply\n",
    "    data_test.loc[data_test['product_pid']==product_pid,\"redeem_amt_pred\"] = pred_redeem\n",
    "    data_test.loc[data_test['product_pid']==product_pid,\"net_in_amt_pred\"] = pred_net_in"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#保存测试集的预测结果到本地\n",
    "way_feature='isWeekEnd'\n",
    "way_model='xgb'\n",
    "way_split='noValid'\n",
    "way_test='applyDirectRedeemDirectNetDirect'\n",
    "data_test.to_csv(PATH_TESTSET_PREDICTIONS+f\"predict_table_feature-{way_feature}_model-{way_model}_split-{way_split}_test-{way_test}.csv\",index=False,columns=['product_pid','transaction_date','apply_amt_pred','redeem_amt_pred','net_in_amt_pred'])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "AFAC2023ApplyAndRedeem_env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.17"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
