{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Please input your directory for the top level folder\n",
    "folder name : SUBMISSION MODEL"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "dir_ = \"INPUT-PROJECT-DIRECTORY/submission_model/\"  # input only here"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### setting other directory"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "raw_data_dir = dir_ + \"2. data/\"\n",
    "processed_data_dir = dir_ + \"2. data/processed/\"\n",
    "log_dir = dir_ + \"4. logs/\"\n",
    "model_dir = dir_ + \"5. models/\"\n",
    "submission_dir = dir_ + \"6. submissions/\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "####################################################################################\n",
    "####################### 1-2. recursive model by store & cat ########################\n",
    "####################################################################################"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "ver, KKK = \"priv\", 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "STORES = [\n",
    "    \"CA_1\",\n",
    "    \"CA_2\",\n",
    "    \"CA_3\",\n",
    "    \"CA_4\",\n",
    "    \"TX_1\",\n",
    "    \"TX_2\",\n",
    "    \"TX_3\",\n",
    "    \"WI_1\",\n",
    "    \"WI_2\",\n",
    "    \"WI_3\",\n",
    "]\n",
    "CATS = [\"HOBBIES\", \"HOUSEHOLD\", \"FOODS\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
   },
   "outputs": [],
   "source": [
    "# General imports\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import os, sys, gc, time, warnings, pickle, psutil, random\n",
    "\n",
    "# custom imports\n",
    "from multiprocessing import Pool\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################### Helpers\n",
    "#################################################################################\n",
    "## Seeder\n",
    "def seed_everything(seed=0):\n",
    "    random.seed(seed)\n",
    "    np.random.seed(seed)\n",
    "\n",
    "\n",
    "## Multiprocess Runs\n",
    "def df_parallelize_run(func, t_split):\n",
    "    num_cores = np.min([N_CORES, len(t_split)])\n",
    "    pool = Pool(num_cores)\n",
    "    df = pd.concat(pool.map(func, t_split), axis=1)\n",
    "    pool.close()\n",
    "    pool.join()\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################### Helper to load data by store ID\n",
    "#################################################################################\n",
    "# Read data\n",
    "def get_data_by_store(store, dept):\n",
    "\n",
    "    # Read and contact basic feature\n",
    "    df = pd.concat(\n",
    "        [\n",
    "            pd.read_pickle(BASE),\n",
    "            pd.read_pickle(PRICE).iloc[:, 2:],\n",
    "            pd.read_pickle(CALENDAR).iloc[:, 2:],\n",
    "        ],\n",
    "        axis=1,\n",
    "    )\n",
    "\n",
    "    df = df[df[\"d\"] >= START_TRAIN]\n",
    "\n",
    "    df = df[(df[\"store_id\"] == store) & (df[\"cat_id\"] == dept)]\n",
    "\n",
    "    df2 = pd.read_pickle(MEAN_ENC)[mean_features]\n",
    "    df2 = df2[df2.index.isin(df.index)]\n",
    "\n",
    "    df3 = pd.read_pickle(LAGS).iloc[:, 3:]\n",
    "    df3 = df3[df3.index.isin(df.index)]\n",
    "\n",
    "    df = pd.concat([df, df2], axis=1)\n",
    "    del df2\n",
    "\n",
    "    df = pd.concat([df, df3], axis=1)\n",
    "    del df3\n",
    "\n",
    "    features = [col for col in list(df) if col not in remove_features]\n",
    "    df = df[[\"id\", \"d\", TARGET] + features]\n",
    "\n",
    "    df = df.reset_index(drop=True)\n",
    "\n",
    "    return df, features\n",
    "\n",
    "\n",
    "# Recombine Test set after training\n",
    "def get_base_test():\n",
    "    base_test = pd.DataFrame()\n",
    "\n",
    "    for store_id in STORES:\n",
    "        for state_id in CATS:\n",
    "            temp_df = pd.read_pickle(\n",
    "                processed_data_dir\n",
    "                + \"test_\"\n",
    "                + store_id\n",
    "                + \"_\"\n",
    "                + state_id\n",
    "                + \".pkl\"\n",
    "            )\n",
    "            temp_df[\"store_id\"] = store_id\n",
    "            temp_df[\"cat_id\"] = state_id\n",
    "            base_test = pd.concat([base_test, temp_df]).reset_index(drop=True)\n",
    "\n",
    "    return base_test\n",
    "\n",
    "\n",
    "########################### Helper to make dynamic rolling lags\n",
    "#################################################################################\n",
    "def make_lag(LAG_DAY):\n",
    "    lag_df = base_test[[\"id\", \"d\", TARGET]]\n",
    "    col_name = \"sales_lag_\" + str(LAG_DAY)\n",
    "    lag_df[col_name] = (\n",
    "        lag_df.groupby([\"id\"])[TARGET]\n",
    "        .transform(lambda x: x.shift(LAG_DAY))\n",
    "        .astype(np.float16)\n",
    "    )\n",
    "    return lag_df[[col_name]]\n",
    "\n",
    "\n",
    "def make_lag_roll(LAG_DAY):\n",
    "    shift_day = LAG_DAY[0]\n",
    "    roll_wind = LAG_DAY[1]\n",
    "    lag_df = base_test[[\"id\", \"d\", TARGET]]\n",
    "    col_name = \"rolling_mean_tmp_\" + str(shift_day) + \"_\" + str(roll_wind)\n",
    "    lag_df[col_name] = lag_df.groupby([\"id\"])[TARGET].transform(\n",
    "        lambda x: x.shift(shift_day).rolling(roll_wind).mean()\n",
    "    )\n",
    "    return lag_df[[col_name]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################### Model params\n",
    "#################################################################################\n",
    "import lightgbm as lgb\n",
    "\n",
    "lgb_params = {\n",
    "    \"boosting_type\": \"gbdt\",\n",
    "    \"objective\": \"tweedie\",\n",
    "    \"tweedie_variance_power\": 1.1,\n",
    "    \"metric\": \"rmse\",\n",
    "    \"subsample\": 0.5,\n",
    "    \"subsample_freq\": 1,\n",
    "    \"learning_rate\": 0.015,\n",
    "    \"num_leaves\": 2**8 - 1,\n",
    "    \"min_data_in_leaf\": 2**8 - 1,\n",
    "    \"feature_fraction\": 0.5,\n",
    "    \"max_bin\": 100,\n",
    "    \"n_estimators\": 3000,\n",
    "    \"boost_from_average\": False,\n",
    "    \"verbose\": -1,\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "########################### Vars\n",
    "#################################################################################\n",
    "VER = 1\n",
    "SEED = 42\n",
    "seed_everything(SEED)\n",
    "lgb_params[\"seed\"] = SEED\n",
    "N_CORES = psutil.cpu_count()\n",
    "\n",
    "\n",
    "# LIMITS and const\n",
    "TARGET = \"sales\"\n",
    "START_TRAIN = 700\n",
    "END_TRAIN = 1941 - 28 * KKK\n",
    "P_HORIZON = 28\n",
    "USE_AUX = False\n",
    "\n",
    "remove_features = [\n",
    "    \"id\",\n",
    "    \"cat_id\",\n",
    "    \"state_id\",\n",
    "    \"store_id\",\n",
    "    \"date\",\n",
    "    \"wm_yr_wk\",\n",
    "    \"d\",\n",
    "    TARGET,\n",
    "]\n",
    "mean_features = [\n",
    "    \"enc_store_id_dept_id_mean\",\n",
    "    \"enc_store_id_dept_id_std\",\n",
    "    \"enc_item_id_store_id_mean\",\n",
    "    \"enc_item_id_store_id_std\",\n",
    "]\n",
    "\n",
    "ORIGINAL = raw_data_dir\n",
    "BASE = processed_data_dir + \"grid_part_1.pkl\"\n",
    "PRICE = processed_data_dir + \"grid_part_2.pkl\"\n",
    "CALENDAR = processed_data_dir + \"grid_part_3.pkl\"\n",
    "LAGS = processed_data_dir + \"lags_df_28.pkl\"\n",
    "MEAN_ENC = processed_data_dir + \"mean_encoding_df.pkl\"\n",
    "\n",
    "\n",
    "# SPLITS for lags creation\n",
    "SHIFT_DAY = 28\n",
    "N_LAGS = 15\n",
    "LAGS_SPLIT = [col for col in range(SHIFT_DAY, SHIFT_DAY + N_LAGS)]\n",
    "ROLS_SPLIT = []\n",
    "for i in [1, 7, 14]:\n",
    "    for j in [7, 14, 30, 60]:\n",
    "        ROLS_SPLIT.append([i, j])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "_, MODEL_FEATURES = get_data_by_store(STORES[-1], CATS[-1])\n",
    "del _\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def pred_q(quantile):\n",
    "\n",
    "    print(quantile)\n",
    "    all_preds = pd.DataFrame()\n",
    "\n",
    "    # Join back the Test dataset with\n",
    "    # a small part of the training data\n",
    "    # to make recursive features\n",
    "    grid_df = base_test.copy()\n",
    "\n",
    "    grid_df = pd.concat(\n",
    "        [grid_df, df_parallelize_run(make_lag_roll, ROLS_SPLIT)], axis=1\n",
    "    )\n",
    "\n",
    "    main_time = time.time()\n",
    "\n",
    "    for PREDICT_DAY in range(1, 29):\n",
    "        print(\"Predict | Day:\", PREDICT_DAY)\n",
    "        start_time = time.time()\n",
    "\n",
    "        for store_id in STORES:\n",
    "            for state_id in CATS:\n",
    "\n",
    "                model_path = (\n",
    "                    model_dir\n",
    "                    + \"lgb_model_\"\n",
    "                    + store_id\n",
    "                    + \"_\"\n",
    "                    + state_id\n",
    "                    + \"_v\"\n",
    "                    + str(VER)\n",
    "                    + \".bin\"\n",
    "                )\n",
    "                if USE_AUX:\n",
    "                    model_path = AUX_MODELS + model_path\n",
    "\n",
    "                estimator = pickle.load(open(model_path, \"rb\"))\n",
    "\n",
    "                day_mask = base_test[\"d\"] == (END_TRAIN + PREDICT_DAY)\n",
    "                store_mask = base_test[\"store_id\"] == store_id\n",
    "                state_mask = base_test[\"cat_id\"] == state_id\n",
    "\n",
    "                mask = (day_mask) & (store_mask) & (state_mask)\n",
    "                print(\"starting to predict\")\n",
    "                base_test[TARGET][mask] = estimator.predict(\n",
    "                    grid_df[mask][MODEL_FEATURES], float(quantile)\n",
    "                )\n",
    "\n",
    "        temp_df = base_test[day_mask][[\"id\", TARGET]]\n",
    "        temp_df.columns = [\"id\", \"F\" + str(PREDICT_DAY)]\n",
    "        if \"id\" in list(all_preds):\n",
    "            all_preds = all_preds.merge(temp_df, on=[\"id\"], how=\"left\")\n",
    "        else:\n",
    "            all_preds = temp_df.copy()\n",
    "\n",
    "        print(\n",
    "            \"#\" * 10,\n",
    "            \" %0.2f min round |\" % ((time.time() - start_time) / 60),\n",
    "            \" %0.2f min total |\" % ((time.time() - main_time) / 60),\n",
    "            \" %0.2f day sales |\" % (temp_df[\"F\" + str(PREDICT_DAY)].sum()),\n",
    "        )\n",
    "        del temp_df\n",
    "\n",
    "    all_preds = all_preds.reset_index(drop=True)\n",
    "    all_preds\n",
    "\n",
    "    ########################### Export\n",
    "    #################################################################################\n",
    "    submission = pd.read_csv(ORIGINAL + \"sample_submission.csv\")[[\"id\"]]\n",
    "    submission = submission.merge(all_preds, on=[\"id\"], how=\"left\").fillna(0)\n",
    "    submission.to_csv(\n",
    "        submission_dir\n",
    "        + f\"before_ensemble/submission_kaggle_recursive_store_cat_{quantile}.csv\",\n",
    "        index=False,\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "base_test = get_base_test()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.005\n",
      "Predict | Day: 1\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "##########  34.76 min round |  61.61 min total |  1000.00 day sales |\n",
      "Predict | Day: 3\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "##########  34.30 min round |  95.92 min total |  814.00 day sales |\n",
      "Predict | Day: 4\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "##########  34.21 min round |  130.13 min total |  734.00 day sales |\n",
      "Predict | Day: 5\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "##########  34.30 min round |  164.43 min total |  858.00 day sales |\n",
      "Predict | Day: 6\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "##########  34.16 min round |  232.81 min total |  394.00 day sales |\n",
      "Predict | Day: 8\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "##########  33.72 min round |  266.54 min total |  75.00 day sales |\n",
      "Predict | Day: 9\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "##########  33.46 min round |  300.00 min total |  90.00 day sales |\n",
      "Predict | Day: 10\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n",
      "starting to predict\n"
     ]
    }
   ],
   "source": [
    "import concurrent.futures\n",
    "\n",
    "for quantile in [\n",
    "    \"0.005\",\n",
    "    \"0.025\",\n",
    "    \"0.165\",\n",
    "    \"0.250\",\n",
    "    \"0.500\",\n",
    "    \"0.750\",\n",
    "    \"0.835\",\n",
    "    \"0.975\",\n",
    "    \"0.995\",\n",
    "]:\n",
    "    with concurrent.futures.ThreadPoolExecutor() as executor:\n",
    "        executor.submit(pred_q, quantile)"
   ]
  }
 ],
 "metadata": {
  "hide_input": false,
  "kernelspec": {
   "display_name": "conda_mxnet_p36",
   "language": "python",
   "name": "conda_mxnet_p36"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
