{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e5ee894-7cf1-46d7-a647-296107d3620c",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import glob\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from joblib import delayed, Parallel\n",
    "from tqdm import tqdm_notebook"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d043820d-d2d6-4a64-92a7-13ec3f6fe6f4",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "train_paths = glob.glob('./AI量化模型预测挑战赛公开数据/train/*')\n",
    "len(train_paths)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dcb9bf29-f960-4a24-92e8-86aba70c6cfa",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "test_paths = glob.glob('./AI量化模型预测挑战赛公开数据/test/*')\n",
    "len(test_paths)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "id": "f6a9d44e-f6d2-4bc2-a1ab-08b7fc156186",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>n_close</th>\n",
       "      <th>amount_delta</th>\n",
       "      <th>n_midprice</th>\n",
       "      <th>n_bid1</th>\n",
       "      <th>n_bsize1</th>\n",
       "      <th>n_bid2</th>\n",
       "      <th>n_bsize2</th>\n",
       "      <th>n_bid3</th>\n",
       "      <th>n_bsize3</th>\n",
       "      <th>n_bid4</th>\n",
       "      <th>...</th>\n",
       "      <th>n_asize3</th>\n",
       "      <th>n_ask4</th>\n",
       "      <th>n_asize4</th>\n",
       "      <th>n_ask5</th>\n",
       "      <th>n_asize5</th>\n",
       "      <th>label_5</th>\n",
       "      <th>label_10</th>\n",
       "      <th>label_20</th>\n",
       "      <th>label_40</th>\n",
       "      <th>label_60</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.005973</td>\n",
       "      <td>990629.0</td>\n",
       "      <td>0.006720</td>\n",
       "      <td>0.006471</td>\n",
       "      <td>1.879378e-06</td>\n",
       "      <td>0.005973</td>\n",
       "      <td>0.000025</td>\n",
       "      <td>0.005475</td>\n",
       "      <td>0.000021</td>\n",
       "      <td>0.004978</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000014</td>\n",
       "      <td>0.008462</td>\n",
       "      <td>0.000016</td>\n",
       "      <td>0.008960</td>\n",
       "      <td>0.000019</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.006471</td>\n",
       "      <td>444943.0</td>\n",
       "      <td>0.006720</td>\n",
       "      <td>0.006471</td>\n",
       "      <td>8.457202e-06</td>\n",
       "      <td>0.005973</td>\n",
       "      <td>0.000027</td>\n",
       "      <td>0.005475</td>\n",
       "      <td>0.000022</td>\n",
       "      <td>0.004978</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000012</td>\n",
       "      <td>0.008462</td>\n",
       "      <td>0.000016</td>\n",
       "      <td>0.008960</td>\n",
       "      <td>0.000019</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.006471</td>\n",
       "      <td>1138480.0</td>\n",
       "      <td>0.006720</td>\n",
       "      <td>0.006471</td>\n",
       "      <td>3.924584e-06</td>\n",
       "      <td>0.005973</td>\n",
       "      <td>0.000031</td>\n",
       "      <td>0.005475</td>\n",
       "      <td>0.000022</td>\n",
       "      <td>0.004978</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000012</td>\n",
       "      <td>0.008462</td>\n",
       "      <td>0.000016</td>\n",
       "      <td>0.008960</td>\n",
       "      <td>0.000019</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.005973</td>\n",
       "      <td>2052770.0</td>\n",
       "      <td>0.006222</td>\n",
       "      <td>0.005973</td>\n",
       "      <td>2.631130e-05</td>\n",
       "      <td>0.005475</td>\n",
       "      <td>0.000023</td>\n",
       "      <td>0.004978</td>\n",
       "      <td>0.000011</td>\n",
       "      <td>0.004480</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000007</td>\n",
       "      <td>0.007964</td>\n",
       "      <td>0.000012</td>\n",
       "      <td>0.008462</td>\n",
       "      <td>0.000016</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.006471</td>\n",
       "      <td>3249679.0</td>\n",
       "      <td>0.006222</td>\n",
       "      <td>0.005973</td>\n",
       "      <td>1.934654e-05</td>\n",
       "      <td>0.005475</td>\n",
       "      <td>0.000023</td>\n",
       "      <td>0.004978</td>\n",
       "      <td>0.000011</td>\n",
       "      <td>0.004480</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000007</td>\n",
       "      <td>0.007964</td>\n",
       "      <td>0.000013</td>\n",
       "      <td>0.008462</td>\n",
       "      <td>0.000016</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1994</th>\n",
       "      <td>-0.003982</td>\n",
       "      <td>104091.0</td>\n",
       "      <td>-0.003733</td>\n",
       "      <td>-0.003982</td>\n",
       "      <td>1.842528e-07</td>\n",
       "      <td>-0.004480</td>\n",
       "      <td>0.000007</td>\n",
       "      <td>-0.004978</td>\n",
       "      <td>0.000017</td>\n",
       "      <td>-0.005475</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000024</td>\n",
       "      <td>-0.001991</td>\n",
       "      <td>0.000010</td>\n",
       "      <td>-0.001493</td>\n",
       "      <td>0.000023</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1995</th>\n",
       "      <td>-0.003982</td>\n",
       "      <td>236168.0</td>\n",
       "      <td>-0.004231</td>\n",
       "      <td>-0.004480</td>\n",
       "      <td>7.020030e-06</td>\n",
       "      <td>-0.004978</td>\n",
       "      <td>0.000017</td>\n",
       "      <td>-0.005475</td>\n",
       "      <td>0.000016</td>\n",
       "      <td>-0.005973</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000022</td>\n",
       "      <td>-0.002489</td>\n",
       "      <td>0.000024</td>\n",
       "      <td>-0.001991</td>\n",
       "      <td>0.000010</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1996</th>\n",
       "      <td>-0.003982</td>\n",
       "      <td>20010.0</td>\n",
       "      <td>-0.004231</td>\n",
       "      <td>-0.004480</td>\n",
       "      <td>7.020030e-06</td>\n",
       "      <td>-0.004978</td>\n",
       "      <td>0.000017</td>\n",
       "      <td>-0.005475</td>\n",
       "      <td>0.000016</td>\n",
       "      <td>-0.005973</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000022</td>\n",
       "      <td>-0.002489</td>\n",
       "      <td>0.000024</td>\n",
       "      <td>-0.001991</td>\n",
       "      <td>0.000011</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1997</th>\n",
       "      <td>-0.003982</td>\n",
       "      <td>1095195.0</td>\n",
       "      <td>-0.003733</td>\n",
       "      <td>-0.003982</td>\n",
       "      <td>1.670804e-06</td>\n",
       "      <td>-0.004480</td>\n",
       "      <td>0.000007</td>\n",
       "      <td>-0.004978</td>\n",
       "      <td>0.000017</td>\n",
       "      <td>-0.005475</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000024</td>\n",
       "      <td>-0.001991</td>\n",
       "      <td>0.000011</td>\n",
       "      <td>-0.001493</td>\n",
       "      <td>0.000023</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1998</th>\n",
       "      <td>-0.003982</td>\n",
       "      <td>270101.0</td>\n",
       "      <td>-0.003733</td>\n",
       "      <td>-0.003982</td>\n",
       "      <td>3.587033e-06</td>\n",
       "      <td>-0.004480</td>\n",
       "      <td>0.000008</td>\n",
       "      <td>-0.004978</td>\n",
       "      <td>0.000017</td>\n",
       "      <td>-0.005475</td>\n",
       "      <td>...</td>\n",
       "      <td>0.000024</td>\n",
       "      <td>-0.001991</td>\n",
       "      <td>0.000011</td>\n",
       "      <td>-0.001493</td>\n",
       "      <td>0.000023</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>1999 rows × 28 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "       n_close  amount_delta  n_midprice    n_bid1      n_bsize1    n_bid2  \\\n",
       "0     0.005973      990629.0    0.006720  0.006471  1.879378e-06  0.005973   \n",
       "1     0.006471      444943.0    0.006720  0.006471  8.457202e-06  0.005973   \n",
       "2     0.006471     1138480.0    0.006720  0.006471  3.924584e-06  0.005973   \n",
       "3     0.005973     2052770.0    0.006222  0.005973  2.631130e-05  0.005475   \n",
       "4     0.006471     3249679.0    0.006222  0.005973  1.934654e-05  0.005475   \n",
       "...        ...           ...         ...       ...           ...       ...   \n",
       "1994 -0.003982      104091.0   -0.003733 -0.003982  1.842528e-07 -0.004480   \n",
       "1995 -0.003982      236168.0   -0.004231 -0.004480  7.020030e-06 -0.004978   \n",
       "1996 -0.003982       20010.0   -0.004231 -0.004480  7.020030e-06 -0.004978   \n",
       "1997 -0.003982     1095195.0   -0.003733 -0.003982  1.670804e-06 -0.004480   \n",
       "1998 -0.003982      270101.0   -0.003733 -0.003982  3.587033e-06 -0.004480   \n",
       "\n",
       "      n_bsize2    n_bid3  n_bsize3    n_bid4  ...  n_asize3    n_ask4  \\\n",
       "0     0.000025  0.005475  0.000021  0.004978  ...  0.000014  0.008462   \n",
       "1     0.000027  0.005475  0.000022  0.004978  ...  0.000012  0.008462   \n",
       "2     0.000031  0.005475  0.000022  0.004978  ...  0.000012  0.008462   \n",
       "3     0.000023  0.004978  0.000011  0.004480  ...  0.000007  0.007964   \n",
       "4     0.000023  0.004978  0.000011  0.004480  ...  0.000007  0.007964   \n",
       "...        ...       ...       ...       ...  ...       ...       ...   \n",
       "1994  0.000007 -0.004978  0.000017 -0.005475  ...  0.000024 -0.001991   \n",
       "1995  0.000017 -0.005475  0.000016 -0.005973  ...  0.000022 -0.002489   \n",
       "1996  0.000017 -0.005475  0.000016 -0.005973  ...  0.000022 -0.002489   \n",
       "1997  0.000007 -0.004978  0.000017 -0.005475  ...  0.000024 -0.001991   \n",
       "1998  0.000008 -0.004978  0.000017 -0.005475  ...  0.000024 -0.001991   \n",
       "\n",
       "      n_asize4    n_ask5  n_asize5  label_5  label_10  label_20  label_40  \\\n",
       "0     0.000016  0.008960  0.000019        1         0         1         0   \n",
       "1     0.000016  0.008960  0.000019        0         0         1         0   \n",
       "2     0.000016  0.008960  0.000019        0         0         1         1   \n",
       "3     0.000012  0.008462  0.000016        0         0         2         0   \n",
       "4     0.000013  0.008462  0.000016        0         0         2         0   \n",
       "...        ...       ...       ...      ...       ...       ...       ...   \n",
       "1994  0.000010 -0.001493  0.000023        1         1         1         1   \n",
       "1995  0.000024 -0.001991  0.000010        1         2         1         2   \n",
       "1996  0.000024 -0.001991  0.000011        1         2         1         2   \n",
       "1997  0.000011 -0.001493  0.000023        1         1         1         1   \n",
       "1998  0.000011 -0.001493  0.000023        1         1         1         1   \n",
       "\n",
       "      label_60  \n",
       "0            0  \n",
       "1            0  \n",
       "2            0  \n",
       "3            0  \n",
       "4            0  \n",
       "...        ...  \n",
       "1994         2  \n",
       "1995         2  \n",
       "1996         2  \n",
       "1997         1  \n",
       "1998         1  \n",
       "\n",
       "[1999 rows x 28 columns]"
      ]
     },
     "execution_count": 116,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.read_csv(glob.glob('./AI量化模型预测挑战赛公开数据/train/*')[1])\n",
    "df = df.iloc[:, 4:]\n",
    "df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "id": "c28a34f4-fb0f-4b76-ab6a-4be9cd67a081",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "df = pd.read_csv(glob.glob('./AI量化模型预测挑战赛公开数据/test/*')[1])\n",
    "df = df.iloc[:, 4:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "id": "88ae1470-5fb1-4036-a74e-773230693d0d",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Index(['n_close', 'amount_delta', 'n_midprice', 'n_bid1', 'n_bsize1', 'n_bid2',\n",
       "       'n_bsize2', 'n_bid3', 'n_bsize3', 'n_bid4', 'n_bsize4', 'n_bid5',\n",
       "       'n_bsize5', 'n_ask1', 'n_asize1', 'n_ask2', 'n_asize2', 'n_ask3',\n",
       "       'n_asize3', 'n_ask4', 'n_asize4', 'n_ask5', 'n_asize5'],\n",
       "      dtype='object')"
      ]
     },
     "execution_count": 118,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.columns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "id": "4a85e08b-7d7c-4c2e-a482-b45087df5c01",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "def base_feature(path, train=True):\n",
    "    df_feat = []\n",
    "    df = pd.read_csv(path)\n",
    "    for idx, row in enumerate(df.iterrows()):\n",
    "        idx_feat = []\n",
    "        for col in ['n_close', 'amount_delta', 'n_midprice', 'n_bid1', 'n_bsize1', 'n_bid2',\n",
    "           'n_bsize2', 'n_bid3', 'n_bsize3', 'n_bid4', 'n_bsize4', 'n_bid5',\n",
    "           'n_bsize5', 'n_ask1', 'n_asize1', 'n_ask2', 'n_asize2', 'n_ask3',\n",
    "           'n_asize3', 'n_ask4', 'n_asize4', 'n_ask5', 'n_asize5']:\n",
    "\n",
    "            idx_feat.append(row[1][col])\n",
    "\n",
    "            if idx == 0:\n",
    "                idx_feat.append(np.nan)\n",
    "            else:\n",
    "                idx_feat.append(row[1][col] - df.iloc[max(0, idx-3):idx][col].mean())\n",
    "                idx_feat.append(row[1][col] - df.iloc[max(0, idx-10):idx][col].mean())\n",
    "                idx_feat.append(row[1][col] - df.iloc[max(0, idx-20):idx][col].mean())\n",
    "                \n",
    "                idx_feat.append(row[1][col] - df.iloc[max(0, idx-3):idx][col].max())\n",
    "                idx_feat.append(row[1][col] - df.iloc[max(0, idx-10):idx][col].max())\n",
    "                idx_feat.append(row[1][col] - df.iloc[max(0, idx-20):idx][col].max())\n",
    "\n",
    "        if train:\n",
    "            idx_feat += list(row[1].iloc[-5:])\n",
    "        \n",
    "        df_feat.append(idx_feat)\n",
    "    \n",
    "    return pd.DataFrame(df_feat)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2d63c860-47d6-4413-bf01-f61d9c91604a",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_16553/2475458747.py:1: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  train_feat = Parallel(n_jobs=5)(delayed(base_feature)(path, True) for path in tqdm_notebook(train_paths[:]))\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "db8076fc350545b2bb7e94543dc4f546",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/1225 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "train_feat = Parallel(n_jobs=5)(delayed(base_feature)(path, True) for path in tqdm_notebook(train_paths[:]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "id": "23076d32-3638-41bc-b313-315264938043",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_16553/1375492100.py:1: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  test_feat = Parallel(n_jobs=5)(delayed(base_feature)(path, False) for path in tqdm_notebook(test_paths[:]))\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6590a5ec0d0242f588b6fb4a0175ebd2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/296 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "test_feat = Parallel(n_jobs=5)(delayed(base_feature)(path, False) for path in tqdm_notebook(test_paths[:]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "id": "ff0fddf5-6e0b-4388-b1dd-b34642d11d06",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_16553/672406464.py:1: FutureWarning: In a future version of pandas all arguments of concat except for the argument 'objs' will be keyword-only\n",
      "  train_feat = pd.concat(train_feat, 0)\n"
     ]
    }
   ],
   "source": [
    "train_feat = pd.concat(train_feat, 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 88,
   "id": "d824a7f8-6ea7-4fe1-bd26-76eba9bee192",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.205441 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.906815\n",
      "[LightGBM] [Info] Start training from score -0.353174\n",
      "[LightGBM] [Info] Start training from score -1.903860\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.198348 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.906815\n",
      "[LightGBM] [Info] Start training from score -0.353174\n",
      "[LightGBM] [Info] Start training from score -1.903860\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035309 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.906811\n",
      "[LightGBM] [Info] Start training from score -0.353175\n",
      "[LightGBM] [Info] Start training from score -1.903860\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035216 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.906811\n",
      "[LightGBM] [Info] Start training from score -0.353175\n",
      "[LightGBM] [Info] Start training from score -1.903860\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.195128 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.906815\n",
      "[LightGBM] [Info] Start training from score -0.353175\n",
      "[LightGBM] [Info] Start training from score -1.903857\n",
      "0.4581991983145662\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.196961 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.568959\n",
      "[LightGBM] [Info] Start training from score -0.534056\n",
      "[LightGBM] [Info] Start training from score -1.582231\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035409 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.568959\n",
      "[LightGBM] [Info] Start training from score -0.534056\n",
      "[LightGBM] [Info] Start training from score -1.582231\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.036105 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.568959\n",
      "[LightGBM] [Info] Start training from score -0.534056\n",
      "[LightGBM] [Info] Start training from score -1.582231\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.036074 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.568959\n",
      "[LightGBM] [Info] Start training from score -0.534057\n",
      "[LightGBM] [Info] Start training from score -1.582229\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.041198 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.568962\n",
      "[LightGBM] [Info] Start training from score -0.534056\n",
      "[LightGBM] [Info] Start training from score -1.582229\n",
      "0.4978202822880688\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.209411 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.801672\n",
      "[LightGBM] [Info] Start training from score -0.401287\n",
      "[LightGBM] [Info] Start training from score -1.798666\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035133 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.801672\n",
      "[LightGBM] [Info] Start training from score -0.401288\n",
      "[LightGBM] [Info] Start training from score -1.798663\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035520 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.801675\n",
      "[LightGBM] [Info] Start training from score -0.401287\n",
      "[LightGBM] [Info] Start training from score -1.798663\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035307 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.801675\n",
      "[LightGBM] [Info] Start training from score -0.401287\n",
      "[LightGBM] [Info] Start training from score -1.798663\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.188419 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.801675\n",
      "[LightGBM] [Info] Start training from score -0.401287\n",
      "[LightGBM] [Info] Start training from score -1.798663\n",
      "0.4222548337638055\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035372 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.444756\n",
      "[LightGBM] [Info] Start training from score -0.628979\n",
      "[LightGBM] [Info] Start training from score -1.465076\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.205554 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.444756\n",
      "[LightGBM] [Info] Start training from score -0.628980\n",
      "[LightGBM] [Info] Start training from score -1.465074\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035178 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.444756\n",
      "[LightGBM] [Info] Start training from score -0.628980\n",
      "[LightGBM] [Info] Start training from score -1.465074\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.196485 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.444756\n",
      "[LightGBM] [Info] Start training from score -0.628980\n",
      "[LightGBM] [Info] Start training from score -1.465074\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035650 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.444756\n",
      "[LightGBM] [Info] Start training from score -0.628980\n",
      "[LightGBM] [Info] Start training from score -1.465074\n",
      "0.4210159047757482\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.036300 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.295829\n",
      "[LightGBM] [Info] Start training from score -0.780288\n",
      "[LightGBM] [Info] Start training from score -1.316563\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.035602 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.295829\n",
      "[LightGBM] [Info] Start training from score -0.780288\n",
      "[LightGBM] [Info] Start training from score -1.316563\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.194618 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.295829\n",
      "[LightGBM] [Info] Start training from score -0.780288\n",
      "[LightGBM] [Info] Start training from score -1.316563\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.037855 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.295829\n",
      "[LightGBM] [Info] Start training from score -0.780288\n",
      "[LightGBM] [Info] Start training from score -1.316563\n",
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.202962 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 1959020, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.295827\n",
      "[LightGBM] [Info] Start training from score -0.780288\n",
      "[LightGBM] [Info] Start training from score -1.316565\n",
      "0.4304634960934304\n"
     ]
    }
   ],
   "source": [
    "import lightgbm as lgb\n",
    "from sklearn.model_selection import cross_val_predict\n",
    "from sklearn.metrics import f1_score\n",
    "\n",
    "val_pred = cross_val_predict(\n",
    "    lgb.LGBMClassifier(),\n",
    "    train_feat.iloc[:, :-5], train_feat.iloc[:, -5]\n",
    ")\n",
    "print(f1_score(train_feat.iloc[:, -5], val_pred, average='macro'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "60f01df3-185b-4d8c-b86a-77c8427945e0",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.042072 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 2448775, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.906813\n",
      "[LightGBM] [Info] Start training from score -0.353175\n",
      "[LightGBM] [Info] Start training from score -1.903860\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.040548 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 2448775, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.568960\n",
      "[LightGBM] [Info] Start training from score -0.534057\n",
      "[LightGBM] [Info] Start training from score -1.582230\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.041424 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 2448775, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.801673\n",
      "[LightGBM] [Info] Start training from score -0.401287\n",
      "[LightGBM] [Info] Start training from score -1.798664\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.040565 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 2448775, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.444756\n",
      "[LightGBM] [Info] Start training from score -0.628980\n",
      "[LightGBM] [Info] Start training from score -1.465074\n",
      "[LightGBM] [Warning] Auto-choosing row-wise multi-threading, the overhead of testing was 0.040407 seconds.\n",
      "You can set `force_row_wise=true` to remove the overhead.\n",
      "And if memory is not enough, you can set `force_col_wise=true`.\n",
      "[LightGBM] [Info] Total Bins 11730\n",
      "[LightGBM] [Info] Number of data points in the train set: 2448775, number of used features: 46\n",
      "[LightGBM] [Info] Start training from score -1.295828\n",
      "[LightGBM] [Info] Start training from score -0.780288\n",
      "[LightGBM] [Info] Start training from score -1.316564\n"
     ]
    }
   ],
   "source": [
    "m5 = lgb.LGBMClassifier()\n",
    "m5.fit(train_feat.iloc[:, :-5], train_feat.iloc[:, -5])\n",
    "\n",
    "m10 = lgb.LGBMClassifier()\n",
    "m10.fit(train_feat.iloc[:, :-5], train_feat.iloc[:, -4])\n",
    "\n",
    "m20 = lgb.LGBMClassifier()\n",
    "m20.fit(train_feat.iloc[:, :-5], train_feat.iloc[:, -3])\n",
    "\n",
    "m40 = lgb.LGBMClassifier()\n",
    "m40.fit(train_feat.iloc[:, :-5], train_feat.iloc[:, -2])\n",
    "\n",
    "m60 = lgb.LGBMClassifier()\n",
    "m60.fit(train_feat.iloc[:, :-5], train_feat.iloc[:, -1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "id": "cd1873b5-f29e-4ab8-a3f9-39dedfdce4f3",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# !mkdir submit"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 105,
   "id": "98db6a22-c075-41d7-ae29-c503df41d527",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "for df, path in zip(test_feat, test_paths):\n",
    "    sub = pd.DataFrame({\n",
    "        'uuid': range(len(df)),\n",
    "        'label_5': m5.predict(df),\n",
    "        'label_10': m10.predict(df),\n",
    "        'label_20': m20.predict(df),\n",
    "        'label_40': m40.predict(df),\n",
    "        'label_60': m60.predict(df)\n",
    "    })\n",
    "    sub.to_csv('./submit/' + path.split('/')[-1], index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1883b3f4-98eb-40f7-8912-7ffbf772188f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3.10"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.10"
  },
  "widgets": {
   "application/vnd.jupyter.widget-state+json": {
    "state": {},
    "version_major": 2,
    "version_minor": 0
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
