{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "7caf33a9-a51d-43bf-8928-f66e3ca78dc8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mchar_train.zip \t http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531795/mchar_train.zip\n",
      "mchar_train.json \t http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531795/mchar_train.json\n",
      "mchar_val.zip \t http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531795/mchar_val.zip\n",
      "mchar_val.json \t http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531795/mchar_val.json\n",
      "mchar_test_a.zip \t http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531795/mchar_test_a.zip\n",
      "mchar_sample_submit_A.csv \t http://tianchi-competition.oss-cn-hangzhou.aliyuncs.com/531795/mchar_sample_submit_A.csv\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import os\n",
    "import requests\n",
    "import zipfile\n",
    "import shutil\n",
    "links = pd.read_csv('./mchar_data_list_0515.csv')\n",
    "dir_name = 'NDataset'\n",
    "mypath = './content'\n",
    "if not os.path.exists(mypath + dir_name):\n",
    "    os.mkdir(mypath + dir_name)\n",
    "for i,link in enumerate(links['link']):\n",
    "    file_name = links['file'][i]\n",
    "    print(file_name, '\\t', link)\n",
    "    file_name = mypath + dir_name + '/' + file_name\n",
    "    if not os.path.exists(file_name):\n",
    "        response = requests.get(link, stream=True)\n",
    "        with open( file_name, 'wb') as f:\n",
    "            for chunk in response.iter_content(chunk_size=1024):\n",
    "                if chunk:\n",
    "                    f.write(chunk)\n",
    "if os.path.exists(mypath + dir_name + '/' + '__MACOSX'):\n",
    "    shutil.rmtree(mypath + dir_name + '/' + '__MACOSX')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "23239128-6c0f-4f3e-833c-9bfa4d7ee11a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "%pylab is deprecated, use %matplotlib inline and import the required libraries.\n",
      "Populating the interactive namespace from numpy and matplotlib\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\HRAppStoreDownload\\anaconda3\\Lib\\site-packages\\IPython\\core\\magics\\pylab.py:162: UserWarning: pylab import has clobbered these variables: ['f', 'random']\n",
      "`%matplotlib` prevents importing * from pylab and numpy\n",
      "  warn(\"pylab import has clobbered these variables: %s\"  % clobbered +\n"
     ]
    }
   ],
   "source": [
    "import os, sys, glob, shutil, json\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n",
    "import cv2\n",
    "\n",
    "from PIL import Image\n",
    "import numpy as np\n",
    "\n",
    "from tqdm import tqdm, tqdm_notebook\n",
    "from torchvision.models import resnet18, ResNet18_Weights\n",
    "\n",
    "%pylab inline\n",
    "\n",
    "import torch\n",
    "torch.manual_seed(0)\n",
    "torch.backends.cudnn.deterministic = False\n",
    "torch.backends.cudnn.benchmark = True\n",
    "\n",
    "import torchvision.models as models\n",
    "import torchvision.transforms as transforms\n",
    "import torchvision.datasets as datasets\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data.dataset import Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "f7afb343-5b26-4a8d-a5b5-6a2584878276",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SVHNDataset(Dataset):\n",
    "    def __init__(self, img_path, img_label, transform=None):\n",
    "        self.img_path = img_path\n",
    "        self.img_label = img_label \n",
    "        if transform is not None:\n",
    "            self.transform = transform\n",
    "        else:\n",
    "            self.transform = None\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        img = Image.open(self.img_path[index]).convert('RGB')\n",
    "\n",
    "        if self.transform is not None:\n",
    "            img = self.transform(img)\n",
    "            \n",
    "        #lbl = np.array(self.img_label[index], dtype=np.int)\n",
    "        lbl = np.array(self.img_label[index], dtype=int)\n",
    "        lbl = list(lbl)  + (5 - len(lbl)) * [10]\n",
    "        return img, torch.from_numpy(np.array(lbl[:5]))\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.img_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "01658e61-efbf-4d28-803b-9c042fd8620a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "30000 30000\n",
      "10000 10000\n"
     ]
    }
   ],
   "source": [
    "\n",
    "train_path = glob.glob('./input/train/*.png')\n",
    "train_path.sort()\n",
    "train_json = json.load(open('./input/train.json'))\n",
    "train_label = [train_json[x]['label'] for x in train_json]\n",
    "print(len(train_path), len(train_label))\n",
    "\n",
    "train_loader = torch.utils.data.DataLoader(\n",
    "    SVHNDataset(train_path, train_label,\n",
    "                transforms.Compose([\n",
    "                    transforms.Resize((64, 128)),\n",
    "                    transforms.RandomCrop((60, 120)),\n",
    "                    transforms.RandomHorizontalFlip(),\n",
    "                    transforms.ColorJitter(0.3, 0.3, 0.2),\n",
    "                    transforms.RandomRotation(13),\n",
    "                    transforms.ToTensor(),\n",
    "    ])), \n",
    "    batch_size=40, \n",
    "    shuffle=True, \n",
    "    num_workers=0,\n",
    ")\n",
    "\n",
    "val_path = glob.glob('./input/val/*.png')\n",
    "val_path.sort()\n",
    "val_json = json.load(open('./input/val.json'))\n",
    "val_label = [val_json[x]['label'] for x in val_json]\n",
    "print(len(val_path), len(val_label))\n",
    "\n",
    "val_loader = torch.utils.data.DataLoader(\n",
    "    SVHNDataset(val_path, val_label,\n",
    "                transforms.Compose([\n",
    "                    transforms.Resize((60, 120)),\n",
    "                    transforms.ColorJitter(0.1, 0.1, 0.1),\n",
    "                    transforms.ToTensor(),\n",
    "    ])), \n",
    "    batch_size=40, \n",
    "    shuffle=False, \n",
    "    num_workers=0,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "467ef776-2557-4a83-9713-0caae79fdb09",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SVHN_Model1(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(SVHN_Model1, self).__init__()\n",
    "\n",
    "        model_conv = resnet18(weights=ResNet18_Weights.DEFAULT)  # 使用最新权重      \n",
    "       # model_conv = models.resnet18(pretrained=True)\n",
    "        model_conv.avgpool = nn.AdaptiveAvgPool2d(1)\n",
    "        model_conv = nn.Sequential(*list(model_conv.children())[:-1])\n",
    "        self.cnn = model_conv\n",
    "        \n",
    "        self.fc1 = nn.Linear(512, 11)\n",
    "        self.fc2 = nn.Linear(512, 11)\n",
    "        self.fc3 = nn.Linear(512, 11)\n",
    "        self.fc4 = nn.Linear(512, 11)\n",
    "        self.fc5 = nn.Linear(512, 11)\n",
    "    \n",
    "    def forward(self, img):        \n",
    "        feat = self.cnn(img)\n",
    "        # print(feat.shape)\n",
    "        feat = feat.view(feat.shape[0], -1)\n",
    "        c1 = self.fc1(feat)\n",
    "        c2 = self.fc2(feat)\n",
    "        c3 = self.fc3(feat)\n",
    "        c4 = self.fc4(feat)\n",
    "        c5 = self.fc5(feat)\n",
    "        return c1, c2, c3, c4, c5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "3a729f8e-c663-4fe5-a26d-37860df9d3a3",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(train_loader, model, criterion, optimizer, epoch):\n",
    "    # 切换模型为训练模式\n",
    "    model.train()\n",
    "    train_loss = []\n",
    "    \n",
    "    for i, (input, target) in enumerate(train_loader):\n",
    "        if use_cuda:\n",
    "            input = input.cuda()\n",
    "            target = target.cuda()\n",
    "            \n",
    "        c0, c1, c2, c3, c4 = model(input)\n",
    "        loss = criterion(c0, target[:, 0].long()) + \\\n",
    "                criterion(c1, target[:, 1].long()) + \\\n",
    "                criterion(c2, target[:, 2].long()) + \\\n",
    "                criterion(c3, target[:, 3].long()) + \\\n",
    "                criterion(c4, target[:, 4].long())\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        train_loss.append(loss.item())\n",
    "    return np.mean(train_loss)\n",
    "\n",
    "def validate(val_loader, model, criterion):\n",
    "    # 切换模型为预测模型\n",
    "    model.eval()\n",
    "    val_loss = []\n",
    "\n",
    "    # 不记录模型梯度信息\n",
    "    with torch.no_grad():\n",
    "        for i, (input, target) in enumerate(val_loader):\n",
    "            if use_cuda:\n",
    "                input = input.cuda()\n",
    "                target = target.cuda()\n",
    "            \n",
    "            c0, c1, c2, c3, c4 = model(input)\n",
    "            loss = criterion(c0, target[:, 0].long()) + \\\n",
    "                    criterion(c1, target[:, 1].long()) + \\\n",
    "                    criterion(c2, target[:, 2].long()) + \\\n",
    "                    criterion(c3, target[:, 3].long()) + \\\n",
    "                    criterion(c4, target[:, 4].long())\n",
    "            \n",
    "            val_loss.append(loss.item())\n",
    "    return np.mean(val_loss)\n",
    "\n",
    "def predict(test_loader, model, tta=10):\n",
    "    model.eval()\n",
    "    test_pred_tta = None\n",
    "    \n",
    "    # TTA 次数\n",
    "    for _ in range(tta):\n",
    "        test_pred = []\n",
    "    \n",
    "        with torch.no_grad():\n",
    "            for i, (input, target) in enumerate(test_loader):\n",
    "                if use_cuda:\n",
    "                    input = input.cuda()\n",
    "                \n",
    "                c0, c1, c2, c3, c4 = model(input)\n",
    "                if use_cuda:\n",
    "                    output = np.concatenate([\n",
    "                        c0.data.cpu().numpy(), \n",
    "                        c1.data.cpu().numpy(),\n",
    "                        c2.data.cpu().numpy(), \n",
    "                        c3.data.cpu().numpy(),\n",
    "                        c4.data.cpu().numpy()], axis=1)\n",
    "                else:\n",
    "                    output = np.concatenate([\n",
    "                        c0.data.numpy(), \n",
    "                        c1.data.numpy(),\n",
    "                        c2.data.numpy(), \n",
    "                        c3.data.numpy(),\n",
    "                        c4.data.numpy()], axis=1)\n",
    "                \n",
    "                test_pred.append(output)\n",
    "        \n",
    "        test_pred = np.vstack(test_pred)\n",
    "        if test_pred_tta is None:\n",
    "            test_pred_tta = test_pred\n",
    "        else:\n",
    "            test_pred_tta += test_pred\n",
    "    \n",
    "    return test_pred_tta"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "14e4dda0-6c08-43bc-953a-67a2f1cc1234",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Current learning rate: 0.001\n",
      "Epoch: 0, Train loss: 4.569718948364258 \t Val loss: 4.7284059257507325\n",
      "Val Acc 0.1642\n",
      "Current learning rate: 0.001\n",
      "Epoch: 1, Train loss: 3.12216930214564 \t Val loss: 3.7478426427841187\n",
      "Val Acc 0.2999\n",
      "Current learning rate: 0.001\n",
      "Epoch: 2, Train loss: 2.5572368524869282 \t Val loss: 3.339671335220337\n",
      "Val Acc 0.3841\n",
      "Current learning rate: 0.001\n",
      "Epoch: 3, Train loss: 2.2585594571431478 \t Val loss: 3.1910605363845823\n",
      "Val Acc 0.4052\n",
      "Current learning rate: 0.001\n",
      "Epoch: 4, Train loss: 2.0426019575595857 \t Val loss: 2.926067464351654\n",
      "Val Acc 0.4594\n",
      "Current learning rate: 0.001\n",
      "Epoch: 5, Train loss: 1.9183137714862823 \t Val loss: 2.756058035373688\n",
      "Val Acc 0.4725\n",
      "Current learning rate: 0.001\n",
      "Epoch: 6, Train loss: 1.8026361051400503 \t Val loss: 2.867757525920868\n",
      "Val Acc 0.459\n",
      "Current learning rate: 0.001\n",
      "Epoch: 7, Train loss: 1.7077351485093435 \t Val loss: 2.778255380630493\n",
      "Val Acc 0.488\n",
      "Current learning rate: 0.001\n",
      "Epoch: 8, Train loss: 1.6393487945397696 \t Val loss: 3.0662317276000977\n",
      "Val Acc 0.4446\n",
      "Current learning rate: 0.001\n",
      "Epoch: 9, Train loss: 1.5567691639264425 \t Val loss: 2.8090655131340028\n",
      "Val Acc 0.4947\n",
      "Current learning rate: 0.001\n",
      "Epoch: 10, Train loss: 1.4989316596190134 \t Val loss: 2.7042841906547546\n",
      "Val Acc 0.5\n",
      "Current learning rate: 0.001\n",
      "Epoch: 11, Train loss: 1.4439502333799998 \t Val loss: 2.705063935995102\n",
      "Val Acc 0.506\n",
      "Current learning rate: 0.001\n",
      "Epoch: 12, Train loss: 1.4063695512612662 \t Val loss: 2.738951578140259\n",
      "Val Acc 0.5045\n",
      "Current learning rate: 0.001\n",
      "Epoch: 13, Train loss: 1.340961026151975 \t Val loss: 2.5907965774536135\n",
      "Val Acc 0.5185\n",
      "Current learning rate: 0.001\n",
      "Epoch: 14, Train loss: 1.3095652418931325 \t Val loss: 2.7273194417953492\n",
      "Val Acc 0.5132\n",
      "Current learning rate: 0.001\n",
      "Epoch: 15, Train loss: 1.2717488454580308 \t Val loss: 2.7166534655094146\n",
      "Val Acc 0.5095\n",
      "Current learning rate: 0.001\n",
      "Epoch: 16, Train loss: 1.240434004386266 \t Val loss: 2.9198019075393677\n",
      "Val Acc 0.4955\n",
      "Current learning rate: 0.001\n",
      "Epoch: 17, Train loss: 1.1896231651703517 \t Val loss: 2.733821882724762\n",
      "Val Acc 0.5068\n",
      "Current learning rate: 0.001\n",
      "Epoch: 18, Train loss: 1.1622348064581554 \t Val loss: 2.480608167171478\n",
      "Val Acc 0.5574\n",
      "Current learning rate: 0.001\n",
      "Epoch: 19, Train loss: 1.1292029850880305 \t Val loss: 2.579346538066864\n",
      "Val Acc 0.5409\n",
      "Current learning rate: 0.001\n",
      "Epoch: 20, Train loss: 1.1028344206412632 \t Val loss: 2.598524364233017\n",
      "Val Acc 0.543\n",
      "Current learning rate: 0.001\n",
      "Epoch: 21, Train loss: 1.0671372361580531 \t Val loss: 2.530459899187088\n",
      "Val Acc 0.563\n",
      "Current learning rate: 0.001\n",
      "Epoch: 22, Train loss: 1.0309440099398295 \t Val loss: 2.7679462394714354\n",
      "Val Acc 0.5153\n",
      "Current learning rate: 0.001\n",
      "Epoch: 23, Train loss: 1.0266354566812514 \t Val loss: 2.7368176970481874\n",
      "Val Acc 0.5299\n",
      "Current learning rate: 0.0001\n",
      "Epoch: 24, Train loss: 0.9890874446630478 \t Val loss: 2.619717338562012\n",
      "Val Acc 0.5596\n",
      "Current learning rate: 0.0001\n",
      "Epoch: 25, Train loss: 0.7700549345612526 \t Val loss: 2.495963944911957\n",
      "Val Acc 0.5859\n",
      "Current learning rate: 0.0001\n",
      "Epoch: 26, Train loss: 0.6943427686790625 \t Val loss: 2.5733668308258055\n",
      "Val Acc 0.582\n",
      "Current learning rate: 0.0001\n",
      "Epoch: 27, Train loss: 0.6713545689384143 \t Val loss: 2.5400693860054018\n",
      "Val Acc 0.5876\n",
      "Current learning rate: 0.0001\n",
      "Epoch: 28, Train loss: 0.6369257508913676 \t Val loss: 2.550480754613876\n",
      "Val Acc 0.5852\n",
      "Current learning rate: 0.0001\n",
      "Epoch: 29, Train loss: 0.6119646582454443 \t Val loss: 2.5692751095294954\n",
      "Val Acc 0.5845\n",
      "Current learning rate: 1e-05\n",
      "Epoch: 30, Train loss: 0.5865826741258303 \t Val loss: 2.584341946601868\n",
      "Val Acc 0.5909\n",
      "Current learning rate: 1e-05\n",
      "Epoch: 31, Train loss: 0.566372638642788 \t Val loss: 2.5642447209358217\n",
      "Val Acc 0.5917\n",
      "Current learning rate: 1e-05\n",
      "Epoch: 32, Train loss: 0.5549380306800207 \t Val loss: 2.5650534286499025\n",
      "Val Acc 0.5908\n",
      "Current learning rate: 1e-05\n",
      "Epoch: 33, Train loss: 0.5522435137132804 \t Val loss: 2.5786803114414214\n",
      "Val Acc 0.5927\n",
      "Current learning rate: 1e-05\n",
      "Epoch: 34, Train loss: 0.5543490079542001 \t Val loss: 2.5893959515094758\n",
      "Val Acc 0.5906\n",
      "Current learning rate: 1e-05\n",
      "Epoch: 35, Train loss: 0.5503239490191142 \t Val loss: 2.5933775806427004\n",
      "Val Acc 0.5897\n",
      "Current learning rate: 1.0000000000000002e-06\n",
      "Epoch: 36, Train loss: 0.5467462799201409 \t Val loss: 2.595342047691345\n",
      "Val Acc 0.5921\n",
      "Current learning rate: 1.0000000000000002e-06\n",
      "Epoch: 37, Train loss: 0.5394443402489026 \t Val loss: 2.569867856025696\n",
      "Val Acc 0.5901\n",
      "Current learning rate: 1.0000000000000002e-06\n",
      "Epoch: 38, Train loss: 0.5389561501344045 \t Val loss: 2.58281720662117\n",
      "Val Acc 0.5913\n",
      "Current learning rate: 1.0000000000000002e-06\n",
      "Epoch: 39, Train loss: 0.5418269115885099 \t Val loss: 2.6245099391937257\n",
      "Val Acc 0.5894\n"
     ]
    }
   ],
   "source": [
    "model = SVHN_Model1()\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), 0.001)\n",
    "best_loss = 1000.0\n",
    "\n",
    "# 是否使用GPU\n",
    "use_cuda = False\n",
    "if use_cuda:\n",
    "    model = model.cuda()\n",
    "# 在训练循环中添加学习率调度器\n",
    "from torch.optim.lr_scheduler import ReduceLROnPlateau\n",
    "\n",
    "scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=5)\n",
    "\n",
    "for epoch in range(40):  \n",
    "    train_loss = train(train_loader, model, criterion, optimizer, epoch)\n",
    "    val_loss = validate(val_loader, model, criterion)\n",
    "    \n",
    "    scheduler.step(val_loss)  # 根据验证集损失调整学习率\n",
    "    current_lr = scheduler.get_last_lr()\n",
    "    print(f\"Current learning rate: {current_lr[0]}\") \n",
    "    val_label = [''.join(map(str, x)) for x in val_loader.dataset.img_label]\n",
    "    val_predict_label = predict(val_loader, model, 1)\n",
    "    val_predict_label = np.vstack([\n",
    "        val_predict_label[:, :11].argmax(1),\n",
    "        val_predict_label[:, 11:22].argmax(1),\n",
    "        val_predict_label[:, 22:33].argmax(1),\n",
    "        val_predict_label[:, 33:44].argmax(1),\n",
    "        val_predict_label[:, 44:55].argmax(1),\n",
    "    ]).T\n",
    "    val_label_pred = []\n",
    "    for x in val_predict_label:\n",
    "        val_label_pred.append(''.join(map(str, x[x!=10])))  # 这里需要确保x!=10的条件是正确的\n",
    "    \n",
    "    # 计算字符准确率\n",
    "    val_char_acc = np.mean(np.array(val_label_pred) == np.array(val_label))\n",
    "\n",
    "    print('Epoch: {0}, Train loss: {1} \\t Val loss: {2}'.format(epoch, train_loss, val_loss))\n",
    "    print('Val Acc', val_char_acc)\n",
    "    # 记录下验证集精度\n",
    "    if val_loss < best_loss:\n",
    "        best_loss = val_loss\n",
    "        torch.save(model.state_dict(), './model.pt')  # 保存模型的状态字典"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "adfa16ef-d4c2-4877-8d0d-26d2b9d672ae",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "7233 7233\n"
     ]
    }
   ],
   "source": [
    "test_path = glob.glob('./input/test_a/*.png')\n",
    "test_path.sort()\n",
    "#test_json = json.load(open('./input/test_a.json'))\n",
    "test_label = [[1]] * len(test_path)\n",
    "print(len(test_path), len(test_label))\n",
    "\n",
    "test_loader = torch.utils.data.DataLoader(\n",
    "    SVHNDataset(test_path, test_label,\n",
    "                transforms.Compose([\n",
    "                    transforms.Resize((70, 140)),\n",
    "                    # transforms.RandomCrop((60, 120)),\n",
    "                    # transforms.ColorJitter(0.3, 0.3, 0.2),\n",
    "                    # transforms.RandomRotation(5),\n",
    "                    transforms.ToTensor(),\n",
    "                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "    ])), \n",
    "    batch_size=40, \n",
    "    shuffle=False, \n",
    "    num_workers=0,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "63f288da-e874-4af6-9aa9-9d8dae2fa8a4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(7233, 55)\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "Length of values (7233) does not match length of index (40000)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[31], line 22\u001b[0m\n\u001b[0;32m     20\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mpandas\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mpd\u001b[39;00m\n\u001b[0;32m     21\u001b[0m df_submit \u001b[38;5;241m=\u001b[39m pd\u001b[38;5;241m.\u001b[39mread_csv(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m./input/test_A_sample_submit.csv\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m---> 22\u001b[0m df_submit[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfile_code\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m test_label_pred\n\u001b[0;32m     23\u001b[0m df_submit\u001b[38;5;241m.\u001b[39mto_csv(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124msubmit.csv\u001b[39m\u001b[38;5;124m'\u001b[39m, index\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m)\n",
      "File \u001b[1;32mD:\\HRAppStoreDownload\\anaconda3\\Lib\\site-packages\\pandas\\core\\frame.py:3950\u001b[0m, in \u001b[0;36mDataFrame.__setitem__\u001b[1;34m(self, key, value)\u001b[0m\n\u001b[0;32m   3947\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_setitem_array([key], value)\n\u001b[0;32m   3948\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m   3949\u001b[0m     \u001b[38;5;66;03m# set column\u001b[39;00m\n\u001b[1;32m-> 3950\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_set_item(key, value)\n",
      "File \u001b[1;32mD:\\HRAppStoreDownload\\anaconda3\\Lib\\site-packages\\pandas\\core\\frame.py:4143\u001b[0m, in \u001b[0;36mDataFrame._set_item\u001b[1;34m(self, key, value)\u001b[0m\n\u001b[0;32m   4133\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_set_item\u001b[39m(\u001b[38;5;28mself\u001b[39m, key, value) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m   4134\u001b[0m \u001b[38;5;250m    \u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m   4135\u001b[0m \u001b[38;5;124;03m    Add series to DataFrame in specified column.\u001b[39;00m\n\u001b[0;32m   4136\u001b[0m \n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   4141\u001b[0m \u001b[38;5;124;03m    ensure homogeneity.\u001b[39;00m\n\u001b[0;32m   4142\u001b[0m \u001b[38;5;124;03m    \"\"\"\u001b[39;00m\n\u001b[1;32m-> 4143\u001b[0m     value \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sanitize_column(value)\n\u001b[0;32m   4145\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m (\n\u001b[0;32m   4146\u001b[0m         key \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcolumns\n\u001b[0;32m   4147\u001b[0m         \u001b[38;5;129;01mand\u001b[39;00m value\u001b[38;5;241m.\u001b[39mndim \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[0;32m   4148\u001b[0m         \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_extension_array_dtype(value)\n\u001b[0;32m   4149\u001b[0m     ):\n\u001b[0;32m   4150\u001b[0m         \u001b[38;5;66;03m# broadcast across multiple columns if necessary\u001b[39;00m\n\u001b[0;32m   4151\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcolumns\u001b[38;5;241m.\u001b[39mis_unique \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mcolumns, MultiIndex):\n",
      "File \u001b[1;32mD:\\HRAppStoreDownload\\anaconda3\\Lib\\site-packages\\pandas\\core\\frame.py:4870\u001b[0m, in \u001b[0;36mDataFrame._sanitize_column\u001b[1;34m(self, value)\u001b[0m\n\u001b[0;32m   4867\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m _reindex_for_setitem(Series(value), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mindex)\n\u001b[0;32m   4869\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m is_list_like(value):\n\u001b[1;32m-> 4870\u001b[0m     com\u001b[38;5;241m.\u001b[39mrequire_length_match(value, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mindex)\n\u001b[0;32m   4871\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m sanitize_array(value, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mindex, copy\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m, allow_2d\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n",
      "File \u001b[1;32mD:\\HRAppStoreDownload\\anaconda3\\Lib\\site-packages\\pandas\\core\\common.py:576\u001b[0m, in \u001b[0;36mrequire_length_match\u001b[1;34m(data, index)\u001b[0m\n\u001b[0;32m    572\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m    573\u001b[0m \u001b[38;5;124;03mCheck the length of data matches the length of the index.\u001b[39;00m\n\u001b[0;32m    574\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m    575\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(data) \u001b[38;5;241m!=\u001b[39m \u001b[38;5;28mlen\u001b[39m(index):\n\u001b[1;32m--> 576\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m    577\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLength of values \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    578\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m(\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(data)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m) \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    579\u001b[0m         \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdoes not match length of index \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    580\u001b[0m         \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m(\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mlen\u001b[39m(index)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m)\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m    581\u001b[0m     )\n",
      "\u001b[1;31mValueError\u001b[0m: Length of values (7233) does not match length of index (40000)"
     ]
    }
   ],
   "source": [
    "# 加载保存的最优模型\n",
    "model.load_state_dict(torch.load('model.pt'))\n",
    "\n",
    "test_predict_label = predict(test_loader, model, 1)\n",
    "print(test_predict_label.shape)\n",
    "\n",
    "test_label = [''.join(map(str, x)) for x in test_loader.dataset.img_label]\n",
    "test_predict_label = np.vstack([\n",
    "    test_predict_label[:, :11].argmax(1),\n",
    "    test_predict_label[:, 11:22].argmax(1),\n",
    "    test_predict_label[:, 22:33].argmax(1),\n",
    "    test_predict_label[:, 33:44].argmax(1),\n",
    "    test_predict_label[:, 44:55].argmax(1),\n",
    "]).T\n",
    "\n",
    "test_label_pred = []\n",
    "for x in test_predict_label:\n",
    "    test_label_pred.append(''.join(map(str, x[x!=10])))\n",
    "    \n",
    "import pandas as pd\n",
    "df_submit = pd.read_csv('./input/test_A_sample_submit.csv')\n",
    "df_submit['file_code'] = test_label_pred\n",
    "df_submit.to_csv('submit.csv', index=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2850a948-0398-431b-8b22-0b44945256f6",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
