{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3f6a6f53",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.optim import lr_scheduler\n",
    "from torch.autograd import Variable\n",
    "import torchvision\n",
    "from torchvision import datasets, models, transforms"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ccd0b023",
   "metadata": {},
   "source": [
    "### Hyper-parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "25ba6004",
   "metadata": {},
   "outputs": [],
   "source": [
    "learning_rate = 0.0001\n",
    "\n",
    "gamma=0.01\n",
    "\n",
    "num_epochs = 15\n",
    "\n",
    "weight_decay = 0.01\n",
    "\n",
    "classes = 5\n",
    "\n",
    "# not used\n",
    "momentum = 0.9\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ede95e84",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'apple': 0, 'banana': 1, 'grape': 2, 'orange': 3, 'pear': 4}\n"
     ]
    }
   ],
   "source": [
    "dataTrans = transforms.Compose([\n",
    "            transforms.Resize(256),\n",
    "            transforms.CenterCrop(224),\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n",
    "])\n",
    "\n",
    "# C:\\Users\\user\\Videos\\yujian\\train\n",
    "\n",
    "#data_dir = r'F:\\games\\a\\data\\aug'\n",
    "train_data_dir = r'D:\\AI\\datasets\\fruits\\train'\n",
    "val_data_dir = r'D:\\AI\\datasets\\fruits\\val'\n",
    "\n",
    "train_dataset = datasets.ImageFolder(train_data_dir, dataTrans)\n",
    "print(train_dataset.class_to_idx)\n",
    "val_dataset = datasets.ImageFolder(val_data_dir, dataTrans)\n",
    "\n",
    "image_datasets = {'train':train_dataset,'val':val_dataset}\n",
    "    \n",
    "\n",
    "    # wrap your data and label into Tensor\n",
    "\n",
    "    \n",
    "dataloders = {x: torch.utils.data.DataLoader(image_datasets[x],\n",
    "                                             batch_size=32,\n",
    "                                             shuffle=True,\n",
    "                                             num_workers=4) for x in ['train', 'val']}\n",
    "\n",
    "dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n",
    "\n",
    "    # use gpu or not\n",
    "use_gpu = torch.cuda.is_available()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "564fb01e",
   "metadata": {},
   "source": [
    "### Labels Above"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a6d79479",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_model(model, lossfunc, optimizer, scheduler, num_epochs=10):\n",
    "    start_time = time.time()\n",
    "\n",
    "    best_model_wts = model.state_dict()\n",
    "    best_acc = 0.0\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n",
    "        print('-' * 10)\n",
    "\n",
    "        # Each epoch has a training and validation phase\n",
    "        for phase in ['train', 'val']:\n",
    "            if phase == 'train':\n",
    "                # must call  If you use the learning rate scheduler (calling scheduler.step()) \n",
    "                # before the optimizer’s update (calling optimizer.step()), \n",
    "                # this will skip the first value of the learning rate schedule.\n",
    "                model.train(True)  # Set model to training mode\n",
    "            else:\n",
    "                model.train(False)  # Set model to evaluate mode\n",
    "\n",
    "            running_loss = 0.0\n",
    "            running_corrects = 0.0\n",
    "\n",
    "            # Iterate over data.\n",
    "            for data in dataloders[phase]:\n",
    "                # get the inputs\n",
    "                inputs, labels = data\n",
    "                \n",
    "\n",
    "                # wrap them in Variable\n",
    "                if use_gpu:\n",
    "                    inputs = Variable(inputs.cuda())\n",
    "                    labels = Variable(labels.cuda())\n",
    "                    #print('gpu')\n",
    "                else:\n",
    "                    inputs, labels = Variable(inputs), Variable(labels)\n",
    "\n",
    "                # zero the parameter gradients\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # forward\n",
    "                outputs = model(inputs)\n",
    "                _, preds = torch.max(outputs.data, 1)\n",
    "                loss = lossfunc(outputs, labels)\n",
    "\n",
    "                # backward + optimize only if in training phase\n",
    "                if phase == 'train':\n",
    "                    loss.backward()\n",
    "                    optimizer.step()\n",
    "\n",
    "                # statistics\n",
    "                running_loss += loss.data\n",
    "                running_corrects += torch.sum(preds == labels.data).to(torch.float32)\n",
    "\n",
    "            if phase == 'train':\n",
    "                scheduler.step()\n",
    "                \n",
    "            epoch_loss = running_loss / dataset_sizes[phase]\n",
    "            epoch_acc = running_corrects / dataset_sizes[phase]\n",
    "\n",
    "            print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n",
    "                phase, epoch_loss, epoch_acc))\n",
    "\n",
    "            # deep copy the model\n",
    "            if phase == 'val' and epoch_acc > best_acc:\n",
    "                best_acc = epoch_acc\n",
    "                best_model_wts = model.state_dict()\n",
    "\n",
    "    elapsed_time = time.time() - start_time\n",
    "    print('Training complete in {:.0f}m {:.0f}s'.format(\n",
    "        elapsed_time // 60, elapsed_time % 60))\n",
    "    print('Best val Acc: {:4f}'.format(best_acc))\n",
    "\n",
    "    # load best model weights\n",
    "    model.load_state_dict(best_model_wts)\n",
    "  \n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "8046eb5e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\luoch\\.conda\\envs\\test1\\lib\\site-packages\\torchvision\\models\\_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n",
      "  warnings.warn(\n",
      "C:\\Users\\luoch\\.conda\\envs\\test1\\lib\\site-packages\\torchvision\\models\\_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=ResNet50_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet50_Weights.DEFAULT` to get the most up-to-date weights.\n",
      "  warnings.warn(msg)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0/14\n",
      "----------\n",
      "train Loss: 0.0426 Acc: 0.5917\n",
      "val Loss: 0.0520 Acc: 0.7143\n",
      "Epoch 1/14\n",
      "----------\n",
      "train Loss: 0.0302 Acc: 0.8881\n",
      "val Loss: 0.0378 Acc: 0.8429\n",
      "Epoch 2/14\n",
      "----------\n",
      "train Loss: 0.0227 Acc: 0.9368\n",
      "val Loss: 0.0298 Acc: 0.9143\n",
      "Epoch 3/14\n",
      "----------\n",
      "train Loss: 0.0179 Acc: 0.9437\n",
      "val Loss: 0.0295 Acc: 0.9143\n",
      "Epoch 4/14\n",
      "----------\n",
      "train Loss: 0.0149 Acc: 0.9570\n",
      "val Loss: 0.0218 Acc: 0.9286\n",
      "Epoch 5/14\n",
      "----------\n",
      "train Loss: 0.0137 Acc: 0.9602\n",
      "val Loss: 0.0199 Acc: 0.9286\n",
      "Epoch 6/14\n",
      "----------\n",
      "train Loss: 0.0136 Acc: 0.9627\n",
      "val Loss: 0.0184 Acc: 0.9429\n",
      "Epoch 7/14\n",
      "----------\n",
      "train Loss: 0.0133 Acc: 0.9576\n",
      "val Loss: 0.0198 Acc: 0.9143\n",
      "Epoch 8/14\n",
      "----------\n",
      "train Loss: 0.0136 Acc: 0.9520\n",
      "val Loss: 0.0188 Acc: 0.9429\n",
      "Epoch 9/14\n",
      "----------\n",
      "train Loss: 0.0135 Acc: 0.9602\n",
      "val Loss: 0.0228 Acc: 0.9286\n",
      "Epoch 10/14\n",
      "----------\n",
      "train Loss: 0.0135 Acc: 0.9589\n",
      "val Loss: 0.0223 Acc: 0.9286\n",
      "Epoch 11/14\n",
      "----------\n",
      "train Loss: 0.0136 Acc: 0.9576\n",
      "val Loss: 0.0235 Acc: 0.9286\n",
      "Epoch 12/14\n",
      "----------\n",
      "train Loss: 0.0137 Acc: 0.9558\n",
      "val Loss: 0.0193 Acc: 0.9571\n",
      "Epoch 13/14\n",
      "----------\n",
      "train Loss: 0.0133 Acc: 0.9621\n",
      "val Loss: 0.0189 Acc: 0.9286\n",
      "Epoch 14/14\n",
      "----------\n",
      "train Loss: 0.0134 Acc: 0.9621\n",
      "val Loss: 0.0216 Acc: 0.9286\n",
      "Training complete in 3m 47s\n",
      "Best val Acc: 0.957143\n"
     ]
    }
   ],
   "source": [
    "# get model and replace the original fc layer with your fc layer\n",
    "model_ft = models.resnet50(pretrained=True, progress=False)\n",
    "num_ftrs = model_ft.fc.in_features\n",
    "model_ft.fc = nn.Linear(num_ftrs, len(train_dataset.classes))\n",
    "\n",
    "if use_gpu:\n",
    "    model_ft = model_ft.cuda()\n",
    "\n",
    "    # define loss function\n",
    "lossfunc = nn.CrossEntropyLoss()\n",
    "\n",
    "    # setting optimizer and trainable parameters\n",
    " #   params = model_ft.parameters()\n",
    " # list(model_ft.fc.parameters())+list(model_ft.layer4.parameters())\n",
    "#params = list(model_ft.fc.parameters())+list( model_ft.parameters())\n",
    "params = list(model_ft.fc.parameters())\n",
    "optimizer_ft = optim.AdamW(params, lr=learning_rate, weight_decay=weight_decay)\n",
    "\n",
    "\n",
    "\n",
    "# optimizer_ft = optim.SGD(params, lr=learning_rate, momentum=0.9)\n",
    "\n",
    "# Decay LR by a factor of 0.1 every 5 epochs\n",
    "exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=5, gamma=gamma)\n",
    "\n",
    "model_ft = train_model(model=model_ft,\n",
    "                           lossfunc=lossfunc,\n",
    "                           optimizer=optimizer_ft,\n",
    "                           scheduler=exp_lr_scheduler,\n",
    "                           num_epochs=num_epochs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "eea78985",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model_ft.state_dict(), r'D:\\AI\\datasets\\fruits\\model\\model.pth', _use_new_zipfile_serialization=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "32bb3c78",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[-1.5744329690933228, 1.2239397764205933, -1.2153080701828003, -0.9994420409202576, -1.5369632244110107]\n",
      "1.2239397764205933\n",
      "[0.046, 0.758, 0.066, 0.082, 0.048]\n",
      "{\n",
      "    \"predicted_label\": \"banana\",\n",
      "    \"scores\": {\n",
      "        \"apple\": 0.046,\n",
      "        \"banana\": 0.758,\n",
      "        \"grape\": 0.066,\n",
      "        \"orange\": 0.082,\n",
      "        \"pear\": 0.048\n",
      "    }\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "from math import exp\n",
    "import numpy as np\n",
    "\n",
    "from PIL import Image\n",
    "import cv2\n",
    "\n",
    "\n",
    "\n",
    "infer_transformation = transforms.Compose([\n",
    "    transforms.Resize(256),\n",
    "    transforms.CenterCrop(224),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n",
    "])\n",
    "\n",
    "\n",
    "IMAGES_KEY = 'images'\n",
    "MODEL_INPUT_KEY = 'images'\n",
    "LABEL_OUTPUT_KEY = 'predicted_label'\n",
    "MODEL_OUTPUT_KEY = 'scores'\n",
    "LABELS_FILE_NAME = 'labels.txt'\n",
    "\n",
    "\n",
    "def decode_image(file_content):\n",
    "    image = Image.open(file_content)\n",
    "    image = image.convert('RGB')\n",
    "    return image\n",
    "\n",
    " \n",
    "def read_label_list(path):\n",
    "    with open(path, 'r',encoding=\"utf8\") as f:\n",
    "        label_list = f.read().split(os.linesep)\n",
    "    label_list = [x.strip() for x in label_list if x.strip()]\n",
    "    return label_list\n",
    "\n",
    "\n",
    "def resnetRun(model_path):\n",
    "\n",
    "    \"\"\"Constructs a ResNet model.\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
    "    \"\"\"\n",
    "    model = models.resnet50(pretrained=False)\n",
    "    num_ftrs = model.fc.in_features\n",
    "    model.fc = nn.Linear(num_ftrs, classes)\n",
    "    model.load_state_dict(torch.load(model_path,map_location ='cpu'))\n",
    "    # model.load_state_dict(torch.load(model_path))\n",
    "\n",
    "    model.eval()\n",
    "\n",
    "    return model\n",
    "\n",
    "\n",
    "def predict(file_name):\n",
    "    #LABEL_LIST = read_label_list('./model/labels.txt')\n",
    "    model = resnetRun(r'D:\\AI\\datasets\\fruits\\model\\model.pth')\n",
    "    \n",
    "    image1 = decode_image(file_name)\n",
    "    \n",
    "\n",
    "    input_img = infer_transformation(image1)\n",
    "\n",
    "    input_img = torch.autograd.Variable(torch.unsqueeze(input_img, dim=0).float(), requires_grad=False)\n",
    "\n",
    "    logits_list =  model(input_img)[0].detach().numpy().tolist()\n",
    "    print(logits_list)\n",
    "    maxlist=max(logits_list)\n",
    "    print(maxlist)\n",
    "\n",
    "    z_exp = [exp(i-maxlist) for i in  logits_list]\n",
    "    \n",
    "    LABEL_LIST = [\"apple\",\"banana\",\"grape\",\"orange\",\"pear\"]\n",
    "\n",
    "    sum_z_exp = sum(z_exp)\n",
    "    softmax = [round(i / sum_z_exp, 3) for i in z_exp]\n",
    "    print(softmax)\n",
    "    labels_to_logits = {\n",
    "        LABEL_LIST[i]: s for i, s in enumerate(softmax)\n",
    "    }\n",
    "    \n",
    "    predict_result = {\n",
    "        LABEL_OUTPUT_KEY: max(labels_to_logits, key=labels_to_logits.get),\n",
    "        MODEL_OUTPUT_KEY: labels_to_logits\n",
    "    }\n",
    "\n",
    "    return predict_result\n",
    "\n",
    "file_name = r'D:\\AI\\datasets\\fruits\\test\\banana\\0322.jpg'\n",
    "result = predict(file_name)  #可以替换其他图片\n",
    "# import matplotlib.pyplot as plt\n",
    "\n",
    "# plt.figure(figsize=(10,10)) #设置窗口大小\n",
    "# img = decode_image(file_name)\n",
    "# plt.imshow(img)\n",
    "# plt.show()\n",
    "\n",
    "#print(result)\n",
    "\n",
    "import json\n",
    "\n",
    "json_str=json.dumps(result, indent=4)\n",
    "\n",
    "print(json_str)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "391faa39",
   "metadata": {},
   "outputs": [],
   "source": [
    "# add to resnet 152"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4f237abd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Used AdamW"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8b0d902a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# need to pretrain"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00b12f15",
   "metadata": {},
   "outputs": [],
   "source": [
    "# data augmentation not yet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "629239a3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# change lr"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
