{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "08bc2fb0",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import pandas as pd\n",
    "from tqdm.notebook import tqdm\n",
    "import numpy as np\n",
    "import os\n",
    "\n",
    "import torchvision\n",
    "from torchvision import datasets, models, transforms\n",
    "import torch.nn as nn\n",
    "from torch.nn import functional as F\n",
    "from torch.utils import data\n",
    "import torch.optim as optim\n",
    "\n",
    "from torchvision.io import read_image\n",
    "from PIL import Image\n",
    "from torch.utils.data import Dataset,DataLoader\n",
    "from torchvision.transforms import ToTensor, Lambda\n",
    "\n",
    "import cv2\n",
    "import matplotlib.pyplot as plt\n",
    "import wandb\n",
    "\n",
    "class MODEL(nn.Module):\n",
    "    def __init__(self, out_label):\n",
    "        super().__init__()\n",
    "        self.resnet = models.resnet18(pretrained=True)\n",
    "        self.resnet.fc = nn.Linear(self.resnet.fc.in_features,out_label)\n",
    "        \n",
    "    def forward(self, X):\n",
    "        return self.resnet(X)\n",
    "\n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self, annotations, img_dir, mode=None, target_transform=None):\n",
    "        super().__init__()\n",
    "        self.img_labels = annotations\n",
    "        self.img_dir = img_dir\n",
    "        if mode=='train':\n",
    "            preprocess = transforms.Compose([\n",
    "                        transforms.Resize(256),\n",
    "                        transforms.CenterCrop(224),\n",
    "                        transforms.RandomHorizontalFlip(p=.25),\n",
    "                        transforms.RandomVerticalFlip(p=.5),\n",
    "                        transforms.ToTensor(),\n",
    "                        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n",
    "        elif mode=='test':\n",
    "            preprocess = transforms.Compose([\n",
    "                        transforms.Resize(256),\n",
    "                        transforms.CenterCrop(224),\n",
    "                        transforms.ToTensor(),\n",
    "                        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n",
    "        elif mode=='val':\n",
    "            preprocess = transforms.Compose([\n",
    "                        transforms.Resize(256),\n",
    "                        transforms.CenterCrop(224),\n",
    "                        transforms.ToTensor(),\n",
    "                        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n",
    "        self.transform = preprocess\n",
    "        self.target_transform = target_transform\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.img_labels)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        img_path = os.path.join(self.img_dir[idx])\n",
    "        with Image.open(img_path) as im:\n",
    "            image = im\n",
    "            label = self.img_labels.iloc[idx]\n",
    "            if self.transform:\n",
    "                image = self.transform(image)\n",
    "            if self.target_transform:\n",
    "                label = self.target_transform(label)\n",
    "            return image, label\n",
    "    \n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "88fbc20a",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_data = pd.read_csv('test.csv')\n",
    "all_data = pd.read_csv('train.csv')\n",
    "train_data = all_data.sample(n=int(len(all_data)*0.9),ignore_index=True)\n",
    "val_data = all_data.sample(n=int(len(all_data)*0.1),ignore_index=True)\n",
    "classes = all_data['label'].unique().tolist()\n",
    "print(\"train_data:\",all_data.shape,\"test_data shape:\",test_data.shape,\"\\nlabel size:\", len(classes))\n",
    "\n",
    "\n",
    "num_epochs, lr, bs, weight_decay = 50, 0.001, 64, 2e-4\n",
    "NUM_SAVE = 5\n",
    "\n",
    "wandb.init(project=\"kaggle_predict_leaves\",\n",
    "           config={ \"learning_rate\": lr,\n",
    "                    \"batch_size\": bs,\n",
    "                    \"total_run\": num_epochs,\n",
    "                    \"weight decay\":weight_decay,\n",
    "                    \"optim\": \"AdamW\"\n",
    "                  }\n",
    "          )\n",
    "\n",
    "target_transform = Lambda(lambda y: torch.tensor(classes.index(y)))\n",
    "\n",
    "\n",
    "training_data = MyDataset(train_data['label'], train_data['image'], 'train', target_transform)\n",
    "train_dataloader = DataLoader(training_data, batch_size=bs, shuffle=True)\n",
    "\n",
    "val_data = MyDataset(val_data['label'], val_data['image'], 'val', target_transform)\n",
    "val_dataloader = DataLoader(val_data, batch_size=bs, shuffle=False)\n",
    "\n",
    "testing_data = MyDataset(test_data['image'], test_data['image'], 'test', None)\n",
    "test_dataloader = DataLoader(testing_data, batch_size=bs, shuffle=False)\n",
    "print(\"train_data length:\",len(training_data),\"test_data length:\",len(test_data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "65f3b7af",
   "metadata": {},
   "outputs": [],
   "source": [
    "net = MODEL(out_label = len(classes)).to(device)\n",
    "\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "params_1x = [param for name, param in net.resnet.named_parameters()\n",
    "             if name not in [\"fc.weight\", \"fc.bias\"]]\n",
    "optimizer = optim.AdamW([{'params': params_1x},\n",
    "                       {'params': net.resnet.fc.parameters(),\n",
    "                        'lr': lr * 10}],\n",
    "                        lr=lr, weight_decay=weight_decay)\n",
    "\n",
    "# 4. 训练过程\n",
    "wandb.watch(net)\n",
    "step = 0\n",
    "for epoch in tqdm(range(num_epochs)):  # loop over the dataset multiple times\n",
    "    \n",
    "    # 训练集\n",
    "    train_accs = []\n",
    "    for i, data in enumerate(train_dataloader,0):\n",
    "        # get the inputs; data is a list of [inputs, labels]\n",
    "        inputs, labels = data\n",
    "        inputs, labels = inputs.to(device), labels.to(device)\n",
    "        # zero the parameter gradients\n",
    "        optimizer.zero_grad()\n",
    "        # forward + backward + optimize\n",
    "        outputs = net(inputs)\n",
    "        loss = criterion(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        # record\n",
    "        step+=1\n",
    "        acc = (outputs.argmax(dim=-1) == labels).float().mean()\n",
    "        train_accs.append(acc)\n",
    "        wandb.log({'loss': loss,'step':step})\n",
    "        del inputs, labels\n",
    "    \n",
    "    train_accuracy = sum(train_accs) / len(train_accs)\n",
    "    wandb.log({'train accuracy': train_accuracy,'epoch': epoch})\n",
    "    \n",
    "    # 验证集\n",
    "    val_accs = []\n",
    "    for i, data in enumerate(val_dataloader,0):\n",
    "        inputs, labels = data\n",
    "        inputs, labels = inputs.to(device), labels.to(device)\n",
    "        outputs = net(inputs)\n",
    "        acc = (outputs.argmax(dim=-1) == labels).float().mean()\n",
    "        val_accs.append(acc)\n",
    "        del inputs, labels\n",
    "    val_accuracy = sum(val_accs) / len(val_accs)\n",
    "    wandb.log({'accuracy': val_accuracy,'epoch': epoch})\n",
    "    print(\"No.\",epoch, \"accuracy:\"+\"{:.2f}%\".format(train_accuracy.item()*100))\n",
    "    if (epoch%(num_epochs//NUM_SAVE)==0) and epoch!=0:\n",
    "        torch.save(net.state_dict(),'checkpoint_'+str(epoch))\n",
    "        print(\"Model Saved\")\n",
    "    \n",
    "wandb.finish()\n",
    "print('Finished Training, the last loss is:', loss.item())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0ea3674c",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.cuda.empty_cache()\n",
    "# 将网络应用于测试集。\n",
    "pre_label=[]\n",
    "# net.to('cpu')\n",
    "for i, data in enumerate(tqdm(test_dataloader,0)):\n",
    "    inputs, labels = data\n",
    "    inputs = inputs.to(device)\n",
    "    outputs = net(inputs)\n",
    "    _, predicted = torch.max(outputs, 1)\n",
    "    for j in range(len(predicted)):\n",
    "        pre_label.append(classes[predicted[j]])\n",
    "\n",
    "# 提交 submission\n",
    "submission = pd.concat([test_data['image'], pd.DataFrame(pre_label,columns =['label'])], axis=1)\n",
    "submission.to_csv('submission.csv', index=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a55f8c4b",
   "metadata": {},
   "source": [
    "# Simple Version"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45dd9b0f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import pandas as pd\n",
    "from tqdm.notebook import tqdm\n",
    "import numpy as np\n",
    "import os\n",
    "\n",
    "import torchvision\n",
    "from torchvision import datasets, models, transforms\n",
    "import torch.nn as nn\n",
    "from torch.nn import functional as F\n",
    "from torch.utils import data\n",
    "import torch.optim as optim\n",
    "\n",
    "from torchvision.io import read_image\n",
    "from PIL import Image\n",
    "from torch.utils.data import Dataset,DataLoader\n",
    "from torchvision.transforms import ToTensor, Lambda\n",
    "\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "import wandb\n",
    "\n",
    "class MODEL(nn.Module):\n",
    "    def __init__(self, out_label):\n",
    "        super().__init__()\n",
    "        self.resnet = models.resnet18(pretrained=True)\n",
    "        self.resnet.fc = nn.Linear(self.resnet.fc.in_features,out_label)\n",
    "        \n",
    "    def forward(self, X):\n",
    "        return self.resnet(X)\n",
    "\n",
    "class MyDataset(Dataset):\n",
    "    def __init__(self, annotations, img_dir, mode=None, target_transform=None):\n",
    "        super().__init__()\n",
    "        self.img_labels = annotations\n",
    "        self.img_dir = img_dir\n",
    "        preprocess = transforms.Compose([\n",
    "                    transforms.Resize(256),\n",
    "                    transforms.CenterCrop(224),\n",
    "                    transforms.ToTensor(),\n",
    "                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),])\n",
    "        self.transform = preprocess\n",
    "        self.target_transform = target_transform\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.img_labels)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        img_path = os.path.join(self.img_dir[idx])\n",
    "        with Image.open(img_path) as im:\n",
    "            image = im\n",
    "            label = self.img_labels.iloc[idx]\n",
    "            if self.transform:\n",
    "                image = self.transform(image)\n",
    "            if self.target_transform:\n",
    "                label = self.target_transform(label)\n",
    "            return image, label\n",
    "    \n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "test_data = pd.read_csv('test.csv')\n",
    "train_data = pd.read_csv('train.csv')\n",
    "classes = train_data['label'].unique().tolist()\n",
    "print(\"train_data:\",train_data.shape,\"test_data shape:\",test_data.shape,\"\\nlabel size:\", len(classes))\n",
    "\n",
    "num_epochs, lr, bs = 30, 0.001, 128\n",
    "NUM_SAVE = 5\n",
    "\n",
    "target_transform = Lambda(lambda y: torch.tensor(classes.index(y)))\n",
    "\n",
    "training_data = MyDataset(train_data['label'], train_data['image'], 'train', target_transform)\n",
    "train_dataloader = DataLoader(training_data, batch_size=bs, shuffle=True)\n",
    "\n",
    "\n",
    "testing_data = MyDataset(test_data['image'], test_data['image'], 'test', None)\n",
    "test_dataloader = DataLoader(testing_data, batch_size=bs, shuffle=False)\n",
    "print(\"train_data length:\",len(training_data),\"test_data length:\",len(test_data))\n",
    "\n",
    "net = MODEL(out_label = len(classes)).to(device)\n",
    "\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9)\n",
    "\n",
    "wandb.init(project=\"kaggle_predict_leaves\",\n",
    "           config={ \"learning_rate\": lr,\n",
    "                    \"batch_size\": bs,\n",
    "                    \"total_run\": num_epochs,\n",
    "                    \"optim\": optimizer\n",
    "                  }\n",
    "          )\n",
    "\n",
    "# 4. 训练过程\n",
    "wandb.watch(net)\n",
    "step = 0\n",
    "for epoch in tqdm(range(num_epochs)):  # loop over the dataset multiple times\n",
    "    \n",
    "    # 训练集\n",
    "    train_accs = []\n",
    "    for i, data in enumerate(train_dataloader,0):\n",
    "        # get the inputs; data is a list of [inputs, labels]\n",
    "        inputs, labels = data\n",
    "        inputs, labels = inputs.to(device), labels.to(device)\n",
    "        # zero the parameter gradients\n",
    "        optimizer.zero_grad()\n",
    "        # forward + backward + optimize\n",
    "        outputs = net(inputs)\n",
    "        loss = criterion(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        # record\n",
    "        step+=1\n",
    "        acc = (outputs.argmax(dim=-1) == labels).float().mean()\n",
    "        train_accs.append(acc)\n",
    "        wandb.log({'loss': loss,'step':step})\n",
    "        del inputs, labels\n",
    "    \n",
    "    train_accuracy = sum(train_accs) / len(train_accs)\n",
    "    wandb.log({'accuracy': train_accuracy,'epoch': epoch})\n",
    "    print(\"No.\",epoch, \"  Accuracy:\"+\"{:.2f}%\".format(train_accuracy.item()*100))\n",
    "    if (epoch%(num_epochs//NUM_SAVE)==0) and epoch!=0:\n",
    "        torch.save(net.state_dict(),'checkpoint_'+str(epoch))\n",
    "        print(\"Model Saved\")\n",
    "    \n",
    "wandb.finish()\n",
    "print(\"Finished Training, accuracy:\"+\"{:.2f}%\".format(train_accuracy.item()*100))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e25ec13d",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.cuda.empty_cache()\n",
    "# 将网络应用于测试集。\n",
    "pre_label=[]\n",
    "net.to('cpu')\n",
    "for i, data in enumerate(tqdm(test_dataloader,0)):\n",
    "    inputs, labels = data\n",
    "#     inputs = inputs.to(device)\n",
    "    outputs = net(inputs)\n",
    "    _, predicted = torch.max(outputs, 1)\n",
    "    for j in range(len(predicted)):\n",
    "        pre_label.append(classes[predicted[j]])\n",
    "\n",
    "# 提交 submission\n",
    "submission = pd.concat([test_data['image'], pd.DataFrame(pre_label,columns =['label'])], axis=1)\n",
    "submission.to_csv('submission.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3d43a63c",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "sim_py38",
   "language": "python",
   "name": "sim_py38"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
