{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "328ea76f-a8d7-4f80-b6e4-66b257d5fa86",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.10/dist-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n",
      "  warnings.warn(\n",
      "/usr/local/lib/python3.10/dist-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=VGG16_Weights.IMAGENET1K_V1`. You can also use `weights=VGG16_Weights.DEFAULT` to get the most up-to-date weights.\n",
      "  warnings.warn(msg)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10, Loss: 2.2442\n",
      "Validation Loss: 1.7119\n",
      "Epoch 2/10, Loss: 1.5354\n",
      "Validation Loss: 1.5287\n",
      "Epoch 3/10, Loss: 1.3427\n",
      "Validation Loss: 1.2756\n",
      "Epoch 4/10, Loss: 1.3136\n",
      "Validation Loss: 1.0305\n",
      "Epoch 5/10, Loss: 1.4613\n",
      "Validation Loss: 1.4009\n",
      "Epoch 6/10, Loss: 1.4121\n",
      "Validation Loss: 1.3300\n",
      "Epoch 7/10, Loss: 1.3436\n",
      "Validation Loss: 1.2015\n",
      "Epoch 8/10, Loss: 1.2019\n",
      "Validation Loss: 0.9081\n",
      "Epoch 9/10, Loss: 1.0146\n",
      "Validation Loss: 0.9550\n",
      "Epoch 10/10, Loss: 1.0330\n",
      "Validation Loss: 1.0998\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torchvision import models, transforms\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision.datasets import ImageFolder\n",
    "\n",
    "# 检查是否有可用的GPU\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "# 加载预训练的VGG16模型\n",
    "base_model = models.vgg16(pretrained=True)\n",
    "base_model.features.requires_grad = False\n",
    "\n",
    "# 修改分类器以匹配类别数量\n",
    "num_classes = 6\n",
    "base_model.classifier[6] = nn.Linear(base_model.classifier[6].in_features, num_classes)\n",
    "\n",
    "# 将模型移动到设备\n",
    "base_model = base_model.to(device)\n",
    "\n",
    "# 定义数据转换\n",
    "data_transforms = {\n",
    "    'train': transforms.Compose([\n",
    "        transforms.RandomResizedCrop(224),\n",
    "        transforms.RandomHorizontalFlip(),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "    ]),\n",
    "    'valid': transforms.Compose([\n",
    "        transforms.Resize(256),\n",
    "        transforms.CenterCrop(224),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "    ]),\n",
    "}\n",
    "\n",
    "# 加载数据集\n",
    "train_dataset = ImageFolder('data/fruits/train', transform=data_transforms['train'])\n",
    "valid_dataset = ImageFolder('data/fruits/valid', transform=data_transforms['valid'])\n",
    "\n",
    "# 创建数据加载器\n",
    "train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
    "valid_loader = DataLoader(valid_dataset, batch_size=32, shuffle=False)\n",
    "\n",
    "# 定义损失函数和优化器\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.Adam(base_model.parameters(), lr=0.001)\n",
    "\n",
    "# 训练模型\n",
    "num_epochs = 10\n",
    "for epoch in range(num_epochs):\n",
    "    base_model.train()\n",
    "    running_loss = 0.0\n",
    "    for inputs, labels in train_loader:\n",
    "        inputs, labels = inputs.to(device), labels.to(device)  # 将数据移动到设备\n",
    "        optimizer.zero_grad()\n",
    "        outputs = base_model(inputs)\n",
    "        loss = criterion(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        running_loss += loss.item() * inputs.size(0)\n",
    "    \n",
    "    epoch_loss = running_loss / len(train_loader.dataset)\n",
    "    print(f'Epoch {epoch+1}/{num_epochs}, Loss: {epoch_loss:.4f}')\n",
    "\n",
    "    base_model.eval()\n",
    "    val_running_loss = 0.0\n",
    "    for inputs, labels in valid_loader:\n",
    "        inputs, labels = inputs.to(device), labels.to(device)  # 将数据移动到设备\n",
    "        outputs = base_model(inputs)\n",
    "        loss = criterion(outputs, labels)\n",
    "        val_running_loss += loss.item() * inputs.size(0)\n",
    "    \n",
    "    val_loss = val_running_loss / len(valid_loader.dataset)\n",
    "    print(f'Validation Loss: {val_loss:.4f}')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "0def5512-78b7-4661-927a-746f1ca51b87",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'utils' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[3], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mutils\u001b[49m\u001b[38;5;241m.\u001b[39mvalidate(my_model, valid_loader, valid_N, loss_function)\n",
      "\u001b[0;31mNameError\u001b[0m: name 'utils' is not defined"
     ]
    }
   ],
   "source": [
    "utils.validate(my_model, valid_loader, valid_N, loss_function)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "f9db9498-c27b-4507-85c4-7e8677b09a80",
   "metadata": {},
   "outputs": [],
   "source": [
    "from run_assessment import run_assessment"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "6817ef7d-9e84-4bc6-bed7-b3c291a904b1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "True"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.optim import Adam\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "import torchvision.transforms.v2 as transforms\n",
    "import torchvision.io as tv_io\n",
    "\n",
    "import glob\n",
    "from PIL import Image\n",
    "\n",
    "import utils\n",
    "\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "torch.cuda.is_available()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "417330c2-9f9f-4c24-a782-5691de366cae",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Evaluating model to obtain average accuracy...\n",
      "\n",
      "Accuracy: 0.5502\n",
      "\n",
      "Accuracy required to pass the assessment is 0.92 or greater.\n",
      "Your average accuracy is 0.5502.\n",
      "\n",
      "Your accuracy is not yet high enough to pass the assessment, please continue trying.\n"
     ]
    }
   ],
   "source": [
    "# run_assessment(my_model)\n",
    "run_assessment(base_model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38e42b5c-2521-4ebb-b197-70438e23c9b7",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
