{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# [Title&Link]()\n",
    "\n",
    "xxx 对该任务的简单介绍\n",
    "\n",
    "## 1. 配置参数及查看显卡"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 一些路径的设置\n",
    "TRAIN_PATH = \"训练集路径\"\n",
    "TEST_PATH = \"测试集路径\"\n",
    "MODEL_SAVE_PATH = \"./model\"\n",
    "RESULT_SAVE_PATH = \"./result\"\n",
    "\n",
    "# MyRnn  关于模型的参数设置\n",
    "HIDDEN_SIZE = 100\n",
    "NUM_LAYERS = 4\n",
    "\n",
    "# MyLstm  多个模型\n",
    "\n",
    "# 训练过程的参数设置\n",
    "VALID_RATE = 0.2\n",
    "BATCH_SIZE = 256\n",
    "NUM_EPOCHS = 1000\n",
    "LEARNING_RATE = 1e-4\n",
    "LOW_BOUND = 1e-6\n",
    "EASY_STOP = 50"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!nvidia-smi"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2. 导入需要的包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gc\n",
    "import os\n",
    "import copy\n",
    "import torch\n",
    "import datetime\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from torch import nn\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import random_split, Dataset, DataLoader"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3. 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_valid_split(dataset: np.ndarray, valid_ratio: float):\n",
    "    \"\"\" 分割训练集和验证集 \"\"\"\n",
    "    valid_size = int(len(dataset) * valid_ratio)\n",
    "    train_size = len(dataset) - valid_size\n",
    "    train, valid = random_split(dataset, [train_size, valid_size])\n",
    "    return np.array(train), np.array(valid)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CovidDataset(Dataset):\n",
    "    \"\"\" 数据集 \"\"\"\n",
    "    def __init__(self, data: np.ndarray, data_flag=\"train\") -> None:\n",
    "        super().__init__()\n",
    "        self.data = data\n",
    "        self.data_flag = data_flag\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        return x, y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_data_loader():\n",
    "    \"\"\" 返回训练集数据加载器 \"\"\"\n",
    "    train_data = pd.read_csv(TRAIN_PATH).values\n",
    "    train_data, valid_data = train_valid_split(train_data, VALID_RATE)\n",
    "    train_loader = DataLoader(CovidDataset(train_data, \"train\"), batch_size=BATCH_SIZE, shuffle=True)\n",
    "    valid_loader = DataLoader(CovidDataset(valid_data, \"train\"), batch_size=BATCH_SIZE, shuffle=True)\n",
    "    return train_loader, valid_loader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test_data_loader(batch_size=BATCH_SIZE):\n",
    "    \"\"\" 返回测试集数据加载器 \"\"\"\n",
    "    test_data = pd.read_csv(TEST_PATH).values\n",
    "    test_loader = DataLoader(CovidDataset(test_data, \"test\"), batch_size=batch_size, shuffle=False)\n",
    "    return test_loader"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4. 模型的定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyRnn(nn.Module):\n",
    "    \"\"\" 基于RNN来预测 \"\"\"\n",
    "    def __init__(self, hidden_size: int) -> None:\n",
    "        super().__init__()\n",
    "        pass\n",
    "\n",
    "    def forward(self, x):\n",
    "        pass\n",
    "        return x"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 5. 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 准备数据\n",
    "train_loader, valid_loader = train_data_loader()\n",
    "\n",
    "# 2. 训练准备\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(\"train device: \", device)\n",
    "net = MyRnn(100).to(device)\n",
    "criterion = nn.MSELoss(reduction=\"mean\")\n",
    "optimizer = torch.optim.Adam(net.parameters(), lr=LEARNING_RATE)\n",
    "best_lost, best_net, easy_stop_count = float(\"inf\"), None, 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 3. 开始训练\n",
    "for epoch in range(NUM_EPOCHS):\n",
    "    # 训练环节\n",
    "    net.train()\n",
    "    loss_record = []\n",
    "    train_pbar = tqdm(train_loader, position=0, leave=True)  # 每个 epoch 的进度条\n",
    "    train_pbar.set_description(f\"Epoch {epoch + 1}/{NUM_EPOCHS}\")\n",
    "\n",
    "    for x, y in train_pbar:\n",
    "        optimizer.zero_grad()  # 梯度归零\n",
    "        x, y = x.to(device), y.to(device)\n",
    "        y = y.reshape(-1, 1)\n",
    "        y_hat = net(x)\n",
    "        loss = criterion(y_hat, y)  # 计算损失\n",
    "        loss.backward()  # 反向传播\n",
    "        optimizer.step()  # 更新参数\n",
    "        loss_record.append(loss.item())\n",
    "        \n",
    "        train_pbar.set_postfix({\"loss\": loss.item()})\n",
    "    \n",
    "    mean_train_loss = sum(loss_record) / len(loss_record)\n",
    "\n",
    "    # 验证环节\n",
    "    net.eval()\n",
    "    loss_record = []\n",
    "    valid_pbar = tqdm(valid_loader, position=0, leave=True)\n",
    "    valid_pbar.set_description(f\"Epoch {epoch + 1}/{NUM_EPOCHS}\")\n",
    "\n",
    "    for x, y in valid_pbar:\n",
    "        x, y = x.to(device), y.to(device)\n",
    "        y = y.reshape(-1, 1)\n",
    "        with torch.no_grad():\n",
    "            y_hat = net(x)\n",
    "            loss = criterion(y_hat, y)\n",
    "            loss_record.append(loss.item())\n",
    "\n",
    "            valid_pbar.set_postfix({\"loss\": loss.item()})\n",
    "    \n",
    "    # 每个 epoch 最终的 loss\n",
    "    mean_valid_loss = sum(loss_record) / len(loss_record)\n",
    "    print(f\"Epoch {epoch + 1}/{NUM_EPOCHS}, train loss: {mean_train_loss}, valid loss: {mean_valid_loss}\")\n",
    "\n",
    "    # 保存最好的模型\n",
    "    if mean_valid_loss < best_lost:\n",
    "        best_lost, best_net = mean_valid_loss, copy.deepcopy(net)\n",
    "        easy_stop_count = 0\n",
    "    else:\n",
    "        easy_stop_count += 1\n",
    "    \n",
    "    # 超过 EASY_STOP 就停止训练\n",
    "    if easy_stop_count >= EASY_STOP:\n",
    "        print(f\"Early stop at epoch {epoch + 1}/{NUM_EPOCHS}\")\n",
    "        break\n",
    "\n",
    "    # 验证集 loss 降低到一定值就开始降低学习率\n",
    "    if optimizer.param_groups[0]['lr'] > LOW_BOUND and easy_stop_count >= 30:\n",
    "        optimizer.param_groups[0]['lr'] /= 10.0\n",
    "        print(f\"Adjust the learning rate {epoch + 1}/{NUM_EPOCHS} Current learning rate: {optimizer.param_groups[0]['lr']}\")\n",
    "        easy_stop_count = 0"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6. 保存模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if not os.path.exists(MODEL_SAVE_PATH):\n",
    "    os.makedirs(MODEL_SAVE_PATH)\n",
    "\n",
    "torch.save(\n",
    "    best_net.state_dict(),\n",
    "    os.path.join(MODEL_SAVE_PATH, f\"xxxxx-{datetime.datetime.now().strftime('%m-%d-%H-%M')}.pth\")\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 释放训练过程中的内存\n",
    "del train_loader, valid_loader\n",
    "del net, best_net\n",
    "torch.cuda.empty_cache()\n",
    "gc.collect()"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 7. 最终的预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1. 准备数据\n",
    "test_loader = test_data_loader(batch_size=1)\n",
    "\n",
    "\n",
    "# 2. 预测准备\n",
    "net_name = \"xxxxxxx.pth\"\n",
    "\n",
    "net = MyRnn(100)\n",
    "net.load_state_dict(torch.load(\n",
    "    os.path.join(MODEL_SAVE_PATH, net_name)\n",
    "))\n",
    "\n",
    "if not os.path.exists(RESULT_SAVE_PATH):\n",
    "    os.makedirs(RESULT_SAVE_PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 3. 开始预测\n",
    "now_time = datetime.datetime.now().strftime(\"%m-%d-%H-%M\")\n",
    "test_pbar = tqdm(test_loader, position=0, leave=True)\n",
    "test_pbar.set_description(f\"Testing...\")\n",
    "\n",
    "with open(os.path.join(RESULT_SAVE_PATH, f\"xxxxx-{now_time}.csv\"), \"w\") as f:\n",
    "    f.write(\"id,tested_positive\\n\")\n",
    "    for id, x in enumerate(test_pbar):\n",
    "        y_hat = net(x)\n",
    "        f.write(f\"{id},{y_hat.item()}\\n\")"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
