{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "id": "63FADEFD1766436B86F7D567A3123DAC",
    "jupyter": {},
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://pypi.doubanio.com/simple/\n",
      "Collecting pytorch_toolbelt\n",
      "  Downloading https://pypi.doubanio.com/packages/16/89/3d1f5273ecec11d7f01b1374868945daa0641294eddf39a822ee2a005790/pytorch_toolbelt-0.4.0.tar.gz (92 kB)\n",
      "\u001b[K     |████████████████████████████████| 92 kB 772 kB/s eta 0:00:011\n",
      "\u001b[?25hCollecting timm\n",
      "  Downloading https://pypi.doubanio.com/packages/26/42/6a0f5dcb5925532cc40dc5a7f8405bdbee597af36e782ed035b14fcc0946/timm-0.2.1-py3-none-any.whl (225 kB)\n",
      "\u001b[K     |████████████████████████████████| 225 kB 1.8 MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting cnn_finetune\n",
      "  Downloading https://pypi.doubanio.com/packages/e4/63/03a442d31401c43fc17a814f22bd7c39ab8f13f42a6b2467ca0d0d042b3a/cnn_finetune-0.6.0.tar.gz (11 kB)\n",
      "Collecting opencv-python\n",
      "  Downloading https://pypi.doubanio.com/packages/bf/fd/30b6b39beff0e270c2aa65e2903ac8ef4a3bd2a7fe696ef944d70984420f/opencv_python-4.4.0.44-cp38-cp38-manylinux2014_x86_64.whl (49.5 MB)\n",
      "\u001b[K     |████████████████████████████████| 49.5 MB 1.4 kB/s eta 0:00:011:01��██                          | 9.3 MB 90.4 MB/s eta 0:00:01      | 17.6 MB 90.4 MB/s eta 0:00:01��████████▌                   | 19.3 MB 90.4 MB/s eta 0:00:01��█████████                   | 20.0 MB 90.4 MB/s eta 0:00:01███▍                 | 22.3 MB 90.4 MB/s eta 0:00:01██████████▏             | 28.1 MB 757 kB/s eta 0:00:29kB/s eta 0:00:26��██▍           | 31.5 MB 757 kB/s eta 0:00:24��███▎    | 42.1 MB 798 kB/s eta 0:00:10ta 0:00:03\n",
      "\u001b[?25hRequirement already satisfied: torch>=1.4 in /opt/conda/lib/python3.8/site-packages (from pytorch_toolbelt) (1.6.0)\n",
      "Requirement already satisfied: torchvision>=0.5 in /opt/conda/lib/python3.8/site-packages (from pytorch_toolbelt) (0.7.0)\n",
      "Collecting torchnet>=0.0.4\n",
      "  Downloading https://pypi.doubanio.com/packages/b7/b2/d7f70a85d3f6b0365517782632f150e3bbc2fb8e998cd69e27deba599aae/torchnet-0.0.4.tar.gz (23 kB)\n",
      "Collecting pretrainedmodels>=0.7.4\n",
      "  Downloading https://pypi.doubanio.com/packages/84/0e/be6a0e58447ac16c938799d49bfb5fb7a80ac35e137547fc6cee2c08c4cf/pretrainedmodels-0.7.4.tar.gz (58 kB)\n",
      "\u001b[K     |████████████████████████████████| 58 kB 2.5 MB/s eta 0:00:011\n",
      "\u001b[?25hRequirement already satisfied: scipy in /opt/conda/lib/python3.8/site-packages (from cnn_finetune) (1.5.2)\n",
      "Requirement already satisfied: tqdm in /opt/conda/lib/python3.8/site-packages (from cnn_finetune) (4.48.2)\n",
      "Requirement already satisfied: numpy>=1.17.3 in /opt/conda/lib/python3.8/site-packages (from opencv-python) (1.19.1)\n",
      "Collecting future\n",
      "  Downloading https://pypi.doubanio.com/packages/45/0b/38b06fd9b92dc2b68d58b75f900e97884c45bedd2ff83203d933cf5851c9/future-0.18.2.tar.gz (829 kB)\n",
      "\u001b[K     |████████████████████████████████| 829 kB 86.8 MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: pillow>=4.1.1 in /opt/conda/lib/python3.8/site-packages (from torchvision>=0.5->pytorch_toolbelt) (7.2.0)\n",
      "Requirement already satisfied: six in /opt/conda/lib/python3.8/site-packages (from torchnet>=0.0.4->pytorch_toolbelt) (1.15.0)\n",
      "Collecting visdom\n",
      "  Downloading https://pypi.doubanio.com/packages/c9/75/e078f5a2e1df7e0d3044749089fc2823e62d029cc027ed8ae5d71fafcbdc/visdom-0.1.8.9.tar.gz (676 kB)\n",
      "\u001b[K     |████████████████████████████████| 676 kB 88.6 MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting munch\n",
      "  Downloading https://pypi.doubanio.com/packages/cc/ab/85d8da5c9a45e072301beb37ad7f833cd344e04c817d97e0cc75681d248f/munch-2.5.0-py2.py3-none-any.whl (10 kB)\n",
      "Requirement already satisfied: requests in /opt/conda/lib/python3.8/site-packages (from visdom->torchnet>=0.0.4->pytorch_toolbelt) (2.24.0)\n",
      "Requirement already satisfied: tornado in /opt/conda/lib/python3.8/site-packages (from visdom->torchnet>=0.0.4->pytorch_toolbelt) (6.0.4)\n",
      "Requirement already satisfied: pyzmq in /opt/conda/lib/python3.8/site-packages (from visdom->torchnet>=0.0.4->pytorch_toolbelt) (19.0.2)\n",
      "Collecting jsonpatch\n",
      "  Downloading https://pypi.doubanio.com/packages/4f/d0/34b0f59ac08de9c1e07876cfecd80aec650600177b4bd445124c755499a7/jsonpatch-1.26-py2.py3-none-any.whl (11 kB)\n",
      "Collecting torchfile\n",
      "  Downloading https://pypi.doubanio.com/packages/91/af/5b305f86f2d218091af657ddb53f984ecbd9518ca9fe8ef4103a007252c9/torchfile-0.1.0.tar.gz (5.2 kB)\n",
      "Collecting websocket-client\n",
      "  Downloading https://pypi.doubanio.com/packages/4c/5f/f61b420143ed1c8dc69f9eaec5ff1ac36109d52c80de49d66e0c36c3dfdf/websocket_client-0.57.0-py2.py3-none-any.whl (200 kB)\n",
      "\u001b[K     |████████████████████████████████| 200 kB 1.6 MB/s eta 0:00:01\n",
      "\u001b[?25hRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /opt/conda/lib/python3.8/site-packages (from requests->visdom->torchnet>=0.0.4->pytorch_toolbelt) (1.25.10)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.8/site-packages (from requests->visdom->torchnet>=0.0.4->pytorch_toolbelt) (2020.6.20)\n",
      "Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.8/site-packages (from requests->visdom->torchnet>=0.0.4->pytorch_toolbelt) (2.10)\n",
      "Requirement already satisfied: chardet<4,>=3.0.2 in /opt/conda/lib/python3.8/site-packages (from requests->visdom->torchnet>=0.0.4->pytorch_toolbelt) (3.0.4)\n",
      "Collecting jsonpointer>=1.9\n",
      "  Downloading https://pypi.doubanio.com/packages/18/b0/a80d29577c08eea401659254dfaed87f1af45272899e1812d7e01b679bc5/jsonpointer-2.0-py2.py3-none-any.whl (7.6 kB)\n",
      "Building wheels for collected packages: pytorch-toolbelt, cnn-finetune, torchnet, pretrainedmodels, future, visdom, torchfile\n",
      "  Building wheel for pytorch-toolbelt (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for pytorch-toolbelt: filename=pytorch_toolbelt-0.4.0-py3-none-any.whl size=123953 sha256=9c2ac32c0b20d0bf6ce01c0c4e5a6905d0c43f0d32dde6a2afc93591d3e2cc33\n",
      "  Stored in directory: /home/kesci/.cache/pip/wheels/30/eb/95/e7e27b959d3648ce464edab8ce4bd9f7a9c88b8a9831786db7\n",
      "  Building wheel for cnn-finetune (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for cnn-finetune: filename=cnn_finetune-0.6.0-py3-none-any.whl size=11432 sha256=553abd95200608e11f0fb0f217ad523ffdd2ff823c035248fe0d9eb22414ce70\n",
      "  Stored in directory: /home/kesci/.cache/pip/wheels/a0/ef/38/8fe6163ad0fdf0b104d97c038c03fa895fc23773c83dde0736\n",
      "  Building wheel for torchnet (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for torchnet: filename=torchnet-0.0.4-py3-none-any.whl size=29744 sha256=39e3298abc0935e20f52ea1115de7dd50398cb54ad2a3ce6f5300d59ba8fee34\n",
      "  Stored in directory: /home/kesci/.cache/pip/wheels/8c/da/1e/164448e643ab29b2a9a37f28e42d2910b150f50b94b41141a9\n",
      "  Building wheel for pretrainedmodels (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for pretrainedmodels: filename=pretrainedmodels-0.7.4-py3-none-any.whl size=60961 sha256=5316886e0d376ec0902fcc9001cab6a778b18a7ccaf4d640807fc94b933aab18\n",
      "  Stored in directory: /home/kesci/.cache/pip/wheels/d5/1e/37/d862895c9b64a81ac4db731f5f4533f38f485c377d9bfb65cf\n",
      "  Building wheel for future (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for future: filename=future-0.18.2-py3-none-any.whl size=491059 sha256=c402851e01426f3a67e023ce2d2569642874eac2070209c04e0ff8334ffd0a6f\n",
      "  Stored in directory: /home/kesci/.cache/pip/wheels/b2/01/78/599e7c2189a135a35ed97000a75b4a1c51a84292110b6ad85f\n",
      "  Building wheel for visdom (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for visdom: filename=visdom-0.1.8.9-py3-none-any.whl size=655249 sha256=d2e644ce2daebe72d389f4bb60297cfeb4cddc2cf30db09e40e6f272e75466d3\n",
      "  Stored in directory: /home/kesci/.cache/pip/wheels/92/4d/9c/3acf5e93e12be2095edba0a18fc94fff7faec312ed4f860d70\n",
      "  Building wheel for torchfile (setup.py) ... \u001b[?25ldone\n",
      "\u001b[?25h  Created wheel for torchfile: filename=torchfile-0.1.0-py3-none-any.whl size=5711 sha256=f7be64e9974a2b8c27bd63bf6c12d88ec74365bbc27861fd71fc5d2eed1a8bdb\n",
      "  Stored in directory: /home/kesci/.cache/pip/wheels/d5/9c/be/3642d7294a0b45c776add58bfddd783393b22da5c2498f165b\n",
      "Successfully built pytorch-toolbelt cnn-finetune torchnet pretrainedmodels future visdom torchfile\n",
      "Installing collected packages: opencv-python, jsonpointer, jsonpatch, torchfile, websocket-client, visdom, torchnet, pytorch-toolbelt, timm, munch, pretrainedmodels, cnn-finetune, future\n",
      "Successfully installed cnn-finetune-0.6.0 future-0.18.2 jsonpatch-1.26 jsonpointer-2.0 munch-2.5.0 opencv-python-4.4.0.44 pretrainedmodels-0.7.4 pytorch-toolbelt-0.4.0 timm-0.2.1 torchfile-0.1.0 torchnet-0.0.4 visdom-0.1.8.9 websocket-client-0.57.0\n"
     ]
    }
   ],
   "source": [
    "# 显示cell运行时长\n",
    "!pip install -i https://pypi.doubanio.com/simple/  --trusted-host pypi.doubanio.com pytorch_toolbelt timm cnn_finetune opencv-python"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "id": "B64CB8BCE79348EF897210C32AA82BD3",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_size:169020;test_size:10564\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'sunny': 61207, 'cloudy': 43614, 'others': 74763}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#\n",
    "'''\n",
    "import json\n",
    "import os\n",
    "import random\n",
    "#制作训练集和验证集\n",
    "with open('/home/kesci/data/competition/train_set/Part1.json','r') as f:\n",
    "    part1_data=json.load(f)\n",
    "with open('/home/kesci/data/competition/train_set/Part2.json','r') as f:\n",
    "    part2_data=json.load(f)\n",
    "with open('/home/kesci/data/competition/train_set/Part3.json','r') as f:\n",
    "    part3_data=json.load(f)\n",
    "with open('/home/kesci/data/competition/train_set/Part4.json','r') as f:\n",
    "    part4_data=json.load(f)\n",
    "#\n",
    "f_train_val=[]\n",
    "f_val=open('/home/kesci/work/dataset/val_pseudo.txt','w')\n",
    "f_train=open('/home/kesci/work/dataset/train_pseudo.txt','w')\n",
    "class_label={'sunny':'0','cloudy':'1','others':'2'}\n",
    "cnt_classes={'sunny':0,'cloudy':0,'others':0}\n",
    "for img_name in part1_data.keys():\n",
    "    cnt_classes[part1_data[img_name]]+=1\n",
    "    f_train_val.append('train_set/Part1/'+img_name+','+class_label[part1_data[img_name]]+'\\n')\n",
    "    #f_train_val.write('Part1/'+img_name+','+class_label[part1_data[img_name]]+'\\n')\n",
    "for img_name in part2_data.keys():\n",
    "    cnt_classes[part2_data[img_name]]+=1\n",
    "    f_train_val.append('train_set/Part2/'+img_name+','+class_label[part2_data[img_name]]+'\\n')\n",
    "    #f_train_val.write('Part2/'+img_name+','+class_label[part2_data[img_name]]+'\\n')\n",
    "for img_name in part3_data.keys():\n",
    "    cnt_classes[part3_data[img_name]]+=1\n",
    "    f_train_val.append('train_set/Part3/'+img_name+','+class_label[part3_data[img_name]]+'\\n')\n",
    "    #f_train_val.write('Part3/'+img_name+','+class_label[part3_data[img_name]]+'\\n')\n",
    "for img_name in part4_data.keys():\n",
    "    cnt_classes[part4_data[img_name]]+=1\n",
    "    f_train_val.append('train_set/Part4/'+img_name+','+class_label[part4_data[img_name]]+'\\n')\n",
    "    #f_train_val.write('Part4/'+img_name+','+class_label[part4_data[img_name]]+'\\n')\n",
    "#split train/val\n",
    "train_size=0\n",
    "test_size=0\n",
    "for i in range(len(f_train_val)):\n",
    "    if i%17==0:\n",
    "        test_size+=1\n",
    "        f_val.write(f_train_val[i])\n",
    "    else:\n",
    "        f_train.write(f_train_val[i])\n",
    "        train_size+=1\n",
    "f_train.close()\n",
    "f_val.close()\n",
    "print(\"train_size:{};test_size:{}\".format(train_size,test_size))\n",
    "cnt_classes\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "id": "000C25CF1BAA4D468E1D058EDB63292D",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "221813"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "169020+52793"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "id": "7954444917DD4664A08961FE78406F6C",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.34105774, 0.24277611, 0.41616615])"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import numpy as np\n",
    "cnt_cls=np.array([61270,43614,74763])\n",
    "cnt_cls/np.sum(cnt_cls)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "id": "9B876016B4DD4B218E0881396B710333",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 3, 300, 300])\n",
      "(304, 606, 3)\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import glob\n",
    "from PIL import Image\n",
    "import torch\n",
    "from torch.utils.data import Dataset,DataLoader\n",
    "import numpy as np\n",
    "from torchvision import transforms as T\n",
    "import torchvision\n",
    "import cv2\n",
    "import pandas as pd\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import DataLoader,Dataset\n",
    "import torch.optim as optim\n",
    "import time\n",
    "from sklearn.metrics import accuracy_score,f1_score,precision_recall_fscore_support\n",
    "from cnn_finetune import make_model\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR\n",
    "from PIL import ImageFile\n",
    "ImageFile.LOAD_TRUNCATED_IMAGES = True\n",
    "class Config(object):\n",
    "    backbone = 'wsl'#'inception_v4'\n",
    "    num_classes = 52 #\n",
    "    use_arcLoss = False\n",
    "    metric = 'arc_margin'\n",
    "    easy_margin = False\n",
    "    loss = 'CrossEntropyLoss'#focal_loss/CrossEntropyLoss\n",
    "    feature_dimension=128\n",
    "    #\n",
    "    input_size = 300\n",
    "    train_batch_size = 24  # batch size\n",
    "    val_batch_size = 4\n",
    "    test_batch_size = 1\n",
    "    optimizer = 'sgd'\n",
    "    lr_scheduler='cosine'\n",
    "    lr = 1e-4  # adam 0.00001\n",
    "    MOMENTUM = 0.9\n",
    "    device = \"cuda\"  # cuda  or cpu\n",
    "    gpu_id = 0\n",
    "    num_workers = 4  # how many workers for loading data\n",
    "    max_epoch = 30\n",
    "    lr_decay_epoch = 10\n",
    "    lr_decay = 0.95  # when val_loss increase, lr = lr*lr_decay\n",
    "    weight_decay = 5e-4\n",
    "    val_interval = 1\n",
    "    print_interval = 50\n",
    "    save_interval = 1\n",
    "    min_save_epoch=0\n",
    "    #\n",
    "    log_dir = 'log/'\n",
    "    train_val_data='/home/kesci/data1/competition/train_set'\n",
    "    train_list='/home/kesci/work/dataset/train.txt'\n",
    "    val_list='/home/kesci/work/dataset/val.txt'\n",
    "    #\n",
    "    test_dir='/home/kesci/data1/competition/test_set/'\n",
    "    submit_csv_dir='/home/kesci/work/submit/'\n",
    "    #\n",
    "    checkpoints_dir = '/home/kesci/work/checkpoints/'\n",
    "    test_model_dir= '/home/kesci/work/checkpoints/wsl/wsl_1.pth'\n",
    "#\n",
    "class wheatherDataset(Dataset):\n",
    "    def __init__(self, root, data_list_file, phase='train', input_size=640):\n",
    "        self.phase = phase\n",
    "\n",
    "        with open(os.path.join(data_list_file), 'r') as fd:\n",
    "            imgs = fd.readlines()\n",
    "\n",
    "        imgs = [os.path.join(root, img.strip('\\n')) for img in imgs]\n",
    "        self.imgs = np.random.permutation(imgs)\n",
    "\n",
    "        normalize = T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n",
    "\n",
    "\n",
    "        if self.phase == 'train':\n",
    "            self.transforms = T.Compose([\n",
    "                T.Resize((input_size,input_size)),\n",
    "                #T.RandomHorizontalFlip(p=0.5),\n",
    "                T.ToTensor(),\n",
    "                normalize\n",
    "            ])\n",
    "        else:\n",
    "            self.transforms = T.Compose([\n",
    "                T.Resize((input_size,input_size)),\n",
    "                T.ToTensor(),\n",
    "                normalize\n",
    "            ])\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        sample = self.imgs[index]\n",
    "        splits = sample.split(',')\n",
    "        img_path = splits[0]\n",
    "        data = Image.open(img_path)\n",
    "        data = data.convert('RGB')\n",
    "        data = self.transforms(data)\n",
    "        label = np.int32(splits[1])\n",
    "        return data.float(), label\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.imgs)\n",
    "\n",
    "class wheatherDatasetInfer(Dataset):\n",
    "\n",
    "    def __init__(self,data_dir, input_size=300,transform=None):\n",
    "        self.image_paths = sorted(glob.glob(data_dir+'/*'))\n",
    "        self.transforms = T.Compose([\n",
    "                T.Resize((input_size,input_size)),\n",
    "                T.RandomHorizontalFlip(p=0.5),\n",
    "                T.ToTensor(),\n",
    "                T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n",
    "            ])\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        sample_path = self.image_paths[index]\n",
    "        data = Image.open(sample_path)\n",
    "        data = data.convert('RGB')\n",
    "        data = self.transforms(data)\n",
    "        return data.float(),sample_path\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.image_paths)\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    opt=Config()\n",
    "    dataset = wheatherDataset(root=opt.train_val_data,\n",
    "                      data_list_file=opt.val_list,\n",
    "                      phase='train',\n",
    "                      input_size=opt.input_size)\n",
    "\n",
    "    trainloader = DataLoader(dataset, batch_size=2)\n",
    "    for i, (data, label) in enumerate(trainloader):\n",
    "        # imgs, labels = data\n",
    "        # print imgs.numpy().shape\n",
    "        # print data.cpu().numpy()\n",
    "        # if i == 0:\n",
    "        print(data.shape)\n",
    "        img = torchvision.utils.make_grid(data).numpy()\n",
    "        # print img.shape\n",
    "        # print label.shape\n",
    "        # chw -> hwc\n",
    "        img = np.transpose(img, (1, 2, 0))\n",
    "        #cv2.imshow('img', img)\n",
    "        img *= np.array([0.5, 0.5, 0.5])*255\n",
    "        img += np.array([0.5, 0.5, 0.5])*255\n",
    "        #img += np.array([1, 1, 1])\n",
    "        #img *= 127.5\n",
    "        img = img.astype(np.uint8)\n",
    "        img = img[:, :, [2, 1, 0]]\n",
    "\n",
    "        print(img.shape)\n",
    "        #cv2.imshow('img', img)\n",
    "        #cv2.waitKey(10000)\n",
    "        break\n",
    "        # dst.decode_segmap(labels.numpy()[0], plot=True)\n",
    "#\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "\n",
    "class LabelSmoothSoftmaxCE(nn.Module):\n",
    "    def __init__(self,\n",
    "                 lb_pos=0.9,\n",
    "                 lb_neg=0.005,\n",
    "                 reduction='mean',\n",
    "                 lb_ignore=255,\n",
    "                 ):\n",
    "        super(LabelSmoothSoftmaxCE, self).__init__()\n",
    "        self.lb_pos = lb_pos\n",
    "        self.lb_neg = lb_neg\n",
    "        self.reduction = reduction\n",
    "        self.lb_ignore = lb_ignore\n",
    "        self.log_softmax = nn.LogSoftmax(1)\n",
    "\n",
    "    def forward(self, logits, label):\n",
    "        logs = self.log_softmax(logits)\n",
    "        ignore = label.data.cpu() == self.lb_ignore\n",
    "        n_valid = (ignore == 0).sum()\n",
    "        label = label.clone()\n",
    "        label[ignore] = 0\n",
    "        lb_one_hot = logits.data.clone().zero_().scatter_(1, label.unsqueeze(1), 1)\n",
    "        label = self.lb_pos * lb_one_hot + self.lb_neg * (1-lb_one_hot)\n",
    "        ignore = ignore.nonzero()\n",
    "        _, M = ignore.size()\n",
    "        a, *b = ignore.chunk(M, dim=1)\n",
    "        label[[a, torch.arange(label.size(1)), *b]] = 0\n",
    "\n",
    "        if self.reduction == 'mean':\n",
    "            loss = -torch.sum(torch.sum(logs*label, dim=1)) / n_valid\n",
    "        elif self.reduction == 'none':\n",
    "            loss = -torch.sum(logs*label, dim=1)\n",
    "        return loss\n",
    "import logging\n",
    "\n",
    "def get_logger(filename, verbosity=1, name=None):\n",
    "    level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}\n",
    "    formatter = logging.Formatter(\n",
    "        \"[%(asctime)s][%(filename)s][%(levelname)s] %(message)s\"\n",
    "    )\n",
    "    logger = logging.getLogger(name)\n",
    "    logger.setLevel(level_dict[verbosity])\n",
    "\n",
    "    fh = logging.FileHandler(filename, \"w\")\n",
    "    fh.setFormatter(formatter)\n",
    "    logger.addHandler(fh)\n",
    "\n",
    "    sh = logging.StreamHandler()\n",
    "    sh.setFormatter(formatter)\n",
    "    logger.addHandler(sh)\n",
    "    return logger\n",
    "#"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "id": "FD2417C375A5447783AEF4C7FF566E24",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "#因为xception权重自动下载太慢，所以先本地下载好，再按照挂载数据那样传上去，再复制到指定目录下,其它模型相同操作\n",
    "#!cp /home/kesci/input/pre_train5544/xception-43020ad28.pth /home/kesci/.cache/torch/hub/checkpoints/"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "id": "C0631D026EE4480BA16028E468369DFE",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "#!cp /home/kesci/input/swsr5567/semi_weakly_supervised_resnext101_32x8-b4712904.pth /home/kesci/.cache/torch/hub/checkpoints"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "1C4D1F8DB94A4A2E840DBE37694A8210",
    "jupyter": {},
    "mdEditEnable": false,
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "source": [
    "## 实验记录\n",
    "- 300输入尺度，5e-4学习率，水平翻转:一个epoch:0.829,线下0.808\n",
    "- 0.829模型用作预训练，2e-4学习率，再训一个epoch，线上0.842，线下0.818\n",
    "- 加上标签平滑(双loss)，将8424的作为预训练再训练一个epoch,线下0.819(812~819波动)\n",
    "    - 虽然都为线下818,但是（三类）侧重点却不一样，分别交的结果都有842，所以训练三个模型进行一个投票操作.线上0.845"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "id": "C44B4E02B88641C981D31C3C102698BE",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "#from pytorch_toolbelt import losses as L\n",
    "#!pip install pytorch_toolbelt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "id": "DAA36778C92E48CE909060DD950C1694",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[2020-10-22 13:42:42,949][<ipython-input-6-4a7b177564e6>][INFO] Using: wsl\n",
      "[2020-10-22 13:42:42,950][<ipython-input-6-4a7b177564e6>][INFO] InputSize: 300\n",
      "[2020-10-22 13:42:42,951][<ipython-input-6-4a7b177564e6>][INFO] optimizer: sgd\n",
      "[2020-10-22 13:42:42,951][<ipython-input-6-4a7b177564e6>][INFO] lr_init: 0.0001\n",
      "[2020-10-22 13:42:42,952][<ipython-input-6-4a7b177564e6>][INFO] criterion: CrossEntropyLoss\n",
      "[2020-10-22 13:42:42,953][<ipython-input-6-4a7b177564e6>][INFO] Using the GPU: 0\n",
      "[2020-10-22 13:42:51,732][<ipython-input-6-4a7b177564e6>][INFO] total_iters:7043\n",
      "[2020-10-22 13:42:51,737][<ipython-input-6-4a7b177564e6>][INFO] start training...\n",
      "[2020-10-22 13:42:51,738][<ipython-input-6-4a7b177564e6>][INFO] learning rate:0.0001\n",
      "[2020-10-22 13:42:51,738][<ipython-input-6-4a7b177564e6>][INFO] Epoch 1/30\n",
      "[2020-10-22 13:42:51,738][<ipython-input-6-4a7b177564e6>][INFO] ----------\n",
      "[2020-10-22 13:42:51,739][<ipython-input-6-4a7b177564e6>][INFO] Learning Rate is 0.00010\n",
      "[2020-10-22 13:42:54,445][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1/7043) loss:0.310 lr:0.0001000 epoch_Time:296.0min:\n",
      "[2020-10-22 13:44:19,491][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(51/7043) loss:0.315 lr:0.0001000 epoch_Time:200.0min:\n",
      "[2020-10-22 13:45:45,606][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(101/7043) loss:0.336 lr:0.0001000 epoch_Time:200.0min:\n",
      "[2020-10-22 13:47:11,773][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(151/7043) loss:0.488 lr:0.0001000 epoch_Time:198.0min:\n",
      "[2020-10-22 13:48:37,919][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(201/7043) loss:0.675 lr:0.0001000 epoch_Time:197.0min:\n",
      "[2020-10-22 13:50:04,085][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(251/7043) loss:0.397 lr:0.0001000 epoch_Time:195.0min:\n",
      "[2020-10-22 13:51:30,265][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(301/7043) loss:0.779 lr:0.0001000 epoch_Time:194.0min:\n",
      "[2020-10-22 13:52:56,441][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(351/7043) loss:0.429 lr:0.0001000 epoch_Time:192.0min:\n",
      "[2020-10-22 13:54:22,610][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(401/7043) loss:0.294 lr:0.0001000 epoch_Time:191.0min:\n",
      "[2020-10-22 13:55:48,792][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(451/7043) loss:0.223 lr:0.0001000 epoch_Time:190.0min:\n",
      "[2020-10-22 13:57:14,957][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(501/7043) loss:0.428 lr:0.0001000 epoch_Time:188.0min:\n",
      "[2020-10-22 13:58:41,112][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(551/7043) loss:0.249 lr:0.0001000 epoch_Time:187.0min:\n",
      "[2020-10-22 14:00:07,269][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(601/7043) loss:0.625 lr:0.0001000 epoch_Time:185.0min:\n",
      "[2020-10-22 14:01:33,469][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(651/7043) loss:0.293 lr:0.0001000 epoch_Time:184.0min:\n",
      "[2020-10-22 14:02:59,648][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(701/7043) loss:0.265 lr:0.0001000 epoch_Time:182.0min:\n",
      "[2020-10-22 14:04:25,822][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(751/7043) loss:0.506 lr:0.0001000 epoch_Time:181.0min:\n",
      "[2020-10-22 14:05:51,986][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(801/7043) loss:0.290 lr:0.0001000 epoch_Time:179.0min:\n",
      "[2020-10-22 14:07:18,168][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(851/7043) loss:0.262 lr:0.0001000 epoch_Time:178.0min:\n",
      "[2020-10-22 14:08:44,312][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(901/7043) loss:0.344 lr:0.0001000 epoch_Time:177.0min:\n",
      "[2020-10-22 14:10:10,473][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(951/7043) loss:0.400 lr:0.0001000 epoch_Time:175.0min:\n",
      "[2020-10-22 14:11:36,646][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1001/7043) loss:0.633 lr:0.0001000 epoch_Time:174.0min:\n",
      "[2020-10-22 14:13:02,824][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1051/7043) loss:0.314 lr:0.0001000 epoch_Time:172.0min:\n",
      "[2020-10-22 14:14:28,980][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1101/7043) loss:0.425 lr:0.0001000 epoch_Time:171.0min:\n",
      "[2020-10-22 14:15:55,127][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1151/7043) loss:0.481 lr:0.0001000 epoch_Time:169.0min:\n",
      "[2020-10-22 14:17:21,314][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1201/7043) loss:0.244 lr:0.0001000 epoch_Time:168.0min:\n",
      "[2020-10-22 14:18:47,465][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1251/7043) loss:0.202 lr:0.0001000 epoch_Time:167.0min:\n",
      "[2020-10-22 14:20:13,623][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1301/7043) loss:0.477 lr:0.0001000 epoch_Time:165.0min:\n",
      "[2020-10-22 14:21:39,784][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1351/7043) loss:0.259 lr:0.0001000 epoch_Time:164.0min:\n",
      "[2020-10-22 14:23:05,984][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1401/7043) loss:0.301 lr:0.0001000 epoch_Time:162.0min:\n",
      "[2020-10-22 14:24:32,165][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1451/7043) loss:0.197 lr:0.0001000 epoch_Time:161.0min:\n",
      "[2020-10-22 14:25:58,340][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1501/7043) loss:0.378 lr:0.0001000 epoch_Time:159.0min:\n",
      "[2020-10-22 14:27:24,507][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1551/7043) loss:0.398 lr:0.0001000 epoch_Time:158.0min:\n",
      "[2020-10-22 14:28:50,681][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1601/7043) loss:0.356 lr:0.0001000 epoch_Time:157.0min:\n",
      "[2020-10-22 14:30:16,843][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1651/7043) loss:0.426 lr:0.0001000 epoch_Time:155.0min:\n",
      "[2020-10-22 14:31:43,030][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1701/7043) loss:0.317 lr:0.0001000 epoch_Time:154.0min:\n",
      "[2020-10-22 14:33:09,218][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1751/7043) loss:0.390 lr:0.0001000 epoch_Time:152.0min:\n",
      "[2020-10-22 14:34:35,401][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1801/7043) loss:0.282 lr:0.0001000 epoch_Time:151.0min:\n",
      "[2020-10-22 14:36:01,564][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1851/7043) loss:0.628 lr:0.0001000 epoch_Time:149.0min:\n",
      "[2020-10-22 14:37:27,735][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1901/7043) loss:0.546 lr:0.0001000 epoch_Time:148.0min:\n",
      "[2020-10-22 14:38:53,930][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(1951/7043) loss:0.653 lr:0.0001000 epoch_Time:146.0min:\n",
      "[2020-10-22 14:40:20,084][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2001/7043) loss:0.329 lr:0.0001000 epoch_Time:145.0min:\n",
      "[2020-10-22 14:41:46,245][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2051/7043) loss:0.270 lr:0.0001000 epoch_Time:144.0min:\n",
      "[2020-10-22 14:43:12,429][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2101/7043) loss:0.340 lr:0.0001000 epoch_Time:142.0min:\n",
      "[2020-10-22 14:44:38,585][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2151/7043) loss:0.363 lr:0.0001000 epoch_Time:141.0min:\n",
      "[2020-10-22 14:46:04,753][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2201/7043) loss:0.233 lr:0.0001000 epoch_Time:139.0min:\n",
      "[2020-10-22 14:47:30,955][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2251/7043) loss:0.163 lr:0.0001000 epoch_Time:138.0min:\n",
      "[2020-10-22 14:48:57,127][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2301/7043) loss:0.199 lr:0.0001000 epoch_Time:136.0min:\n",
      "[2020-10-22 14:50:23,305][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2351/7043) loss:0.638 lr:0.0001000 epoch_Time:135.0min:\n",
      "[2020-10-22 14:51:49,467][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2401/7043) loss:0.836 lr:0.0001000 epoch_Time:134.0min:\n",
      "[2020-10-22 14:53:15,625][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2451/7043) loss:0.410 lr:0.0001000 epoch_Time:132.0min:\n",
      "[2020-10-22 14:54:41,807][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2501/7043) loss:0.470 lr:0.0001000 epoch_Time:131.0min:\n",
      "[2020-10-22 14:56:07,969][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2551/7043) loss:0.379 lr:0.0001000 epoch_Time:129.0min:\n",
      "[2020-10-22 14:57:34,143][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2601/7043) loss:0.495 lr:0.0001000 epoch_Time:128.0min:\n",
      "[2020-10-22 14:59:00,351][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2651/7043) loss:0.244 lr:0.0001000 epoch_Time:126.0min:\n",
      "[2020-10-22 15:00:26,508][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2701/7043) loss:0.190 lr:0.0001000 epoch_Time:125.0min:\n",
      "[2020-10-22 15:01:52,664][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2751/7043) loss:0.741 lr:0.0001000 epoch_Time:123.0min:\n",
      "[2020-10-22 15:03:18,823][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2801/7043) loss:0.381 lr:0.0001000 epoch_Time:122.0min:\n",
      "[2020-10-22 15:04:45,021][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2851/7043) loss:0.674 lr:0.0001000 epoch_Time:121.0min:\n",
      "[2020-10-22 15:06:11,189][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2901/7043) loss:0.209 lr:0.0001000 epoch_Time:119.0min:\n",
      "[2020-10-22 15:07:37,335][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(2951/7043) loss:0.240 lr:0.0001000 epoch_Time:118.0min:\n",
      "[2020-10-22 15:09:03,527][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3001/7043) loss:0.546 lr:0.0001000 epoch_Time:116.0min:\n",
      "[2020-10-22 15:10:29,693][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3051/7043) loss:0.300 lr:0.0001000 epoch_Time:115.0min:\n",
      "[2020-10-22 15:11:55,842][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3101/7043) loss:0.379 lr:0.0001000 epoch_Time:113.0min:\n",
      "[2020-10-22 15:13:21,997][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3151/7043) loss:0.299 lr:0.0001000 epoch_Time:112.0min:\n",
      "[2020-10-22 15:14:48,178][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3201/7043) loss:0.182 lr:0.0001000 epoch_Time:111.0min:\n",
      "[2020-10-22 15:16:14,372][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3251/7043) loss:0.601 lr:0.0001000 epoch_Time:109.0min:\n",
      "[2020-10-22 15:17:40,539][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3301/7043) loss:0.257 lr:0.0001000 epoch_Time:108.0min:\n",
      "[2020-10-22 15:19:06,724][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3351/7043) loss:0.231 lr:0.0001000 epoch_Time:106.0min:\n",
      "[2020-10-22 15:20:32,918][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3401/7043) loss:0.191 lr:0.0001000 epoch_Time:105.0min:\n",
      "[2020-10-22 15:21:59,084][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3451/7043) loss:0.263 lr:0.0001000 epoch_Time:103.0min:\n",
      "[2020-10-22 15:23:25,247][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3501/7043) loss:0.431 lr:0.0001000 epoch_Time:102.0min:\n",
      "[2020-10-22 15:24:51,443][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3551/7043) loss:0.308 lr:0.0001000 epoch_Time:101.0min:\n",
      "[2020-10-22 15:26:17,607][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3601/7043) loss:0.412 lr:0.0001000 epoch_Time:99.0min:\n",
      "[2020-10-22 15:27:43,765][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3651/7043) loss:0.585 lr:0.0001000 epoch_Time:98.0min:\n",
      "[2020-10-22 15:29:09,917][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3701/7043) loss:0.436 lr:0.0001000 epoch_Time:96.0min:\n",
      "[2020-10-22 15:30:36,112][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3751/7043) loss:0.341 lr:0.0001000 epoch_Time:95.0min:\n",
      "[2020-10-22 15:32:02,279][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3801/7043) loss:0.299 lr:0.0001000 epoch_Time:93.0min:\n",
      "[2020-10-22 15:33:28,446][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3851/7043) loss:0.319 lr:0.0001000 epoch_Time:92.0min:\n",
      "[2020-10-22 15:34:54,652][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3901/7043) loss:0.317 lr:0.0001000 epoch_Time:90.0min:\n",
      "[2020-10-22 15:36:20,846][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(3951/7043) loss:0.273 lr:0.0001000 epoch_Time:89.0min:\n",
      "[2020-10-22 15:37:47,008][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4001/7043) loss:0.333 lr:0.0001000 epoch_Time:88.0min:\n",
      "[2020-10-22 15:39:13,158][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4051/7043) loss:0.270 lr:0.0001000 epoch_Time:86.0min:\n",
      "[2020-10-22 15:40:39,341][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4101/7043) loss:0.424 lr:0.0001000 epoch_Time:85.0min:\n",
      "[2020-10-22 15:42:05,532][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4151/7043) loss:0.507 lr:0.0001000 epoch_Time:83.0min:\n",
      "[2020-10-22 15:43:31,688][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4201/7043) loss:0.159 lr:0.0001000 epoch_Time:82.0min:\n",
      "[2020-10-22 15:44:57,861][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4251/7043) loss:0.515 lr:0.0001000 epoch_Time:80.0min:\n",
      "[2020-10-22 15:46:24,088][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4301/7043) loss:0.427 lr:0.0001000 epoch_Time:79.0min:\n",
      "[2020-10-22 15:47:50,260][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4351/7043) loss:0.603 lr:0.0001000 epoch_Time:78.0min:\n",
      "[2020-10-22 15:49:16,421][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4401/7043) loss:0.164 lr:0.0001000 epoch_Time:76.0min:\n",
      "[2020-10-22 15:50:42,588][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4451/7043) loss:0.408 lr:0.0001000 epoch_Time:75.0min:\n",
      "[2020-10-22 15:52:08,758][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4501/7043) loss:0.250 lr:0.0001000 epoch_Time:73.0min:\n",
      "[2020-10-22 15:53:34,931][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4551/7043) loss:0.476 lr:0.0001000 epoch_Time:72.0min:\n",
      "[2020-10-22 15:55:01,119][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4601/7043) loss:0.498 lr:0.0001000 epoch_Time:70.0min:\n",
      "[2020-10-22 15:56:27,321][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4651/7043) loss:0.304 lr:0.0001000 epoch_Time:69.0min:\n",
      "[2020-10-22 15:57:53,478][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4701/7043) loss:0.374 lr:0.0001000 epoch_Time:67.0min:\n",
      "[2020-10-22 15:59:19,630][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4751/7043) loss:0.305 lr:0.0001000 epoch_Time:66.0min:\n",
      "[2020-10-22 16:00:45,795][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4801/7043) loss:0.250 lr:0.0001000 epoch_Time:65.0min:\n",
      "[2020-10-22 16:02:11,974][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4851/7043) loss:0.405 lr:0.0001000 epoch_Time:63.0min:\n",
      "[2020-10-22 16:03:38,125][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4901/7043) loss:0.527 lr:0.0001000 epoch_Time:62.0min:\n",
      "[2020-10-22 16:05:04,296][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(4951/7043) loss:0.446 lr:0.0001000 epoch_Time:60.0min:\n",
      "[2020-10-22 16:06:30,473][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5001/7043) loss:0.223 lr:0.0001000 epoch_Time:59.0min:\n",
      "[2020-10-22 16:07:56,649][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5051/7043) loss:0.395 lr:0.0001000 epoch_Time:57.0min:\n",
      "[2020-10-22 16:09:22,824][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5101/7043) loss:0.611 lr:0.0001000 epoch_Time:56.0min:\n",
      "[2020-10-22 16:10:48,999][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5151/7043) loss:0.230 lr:0.0001000 epoch_Time:55.0min:\n",
      "[2020-10-22 16:12:15,175][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5201/7043) loss:0.590 lr:0.0001000 epoch_Time:53.0min:\n",
      "[2020-10-22 16:13:41,317][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5251/7043) loss:0.601 lr:0.0001000 epoch_Time:52.0min:\n",
      "[2020-10-22 16:15:07,475][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5301/7043) loss:0.356 lr:0.0001000 epoch_Time:50.0min:\n",
      "[2020-10-22 16:16:33,658][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5351/7043) loss:0.305 lr:0.0001000 epoch_Time:49.0min:\n",
      "[2020-10-22 16:17:59,842][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5401/7043) loss:0.244 lr:0.0001000 epoch_Time:47.0min:\n",
      "[2020-10-22 16:19:26,014][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5451/7043) loss:0.238 lr:0.0001000 epoch_Time:46.0min:\n",
      "[2020-10-22 16:20:52,184][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5501/7043) loss:0.289 lr:0.0001000 epoch_Time:44.0min:\n",
      "[2020-10-22 16:22:18,366][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5551/7043) loss:0.371 lr:0.0001000 epoch_Time:43.0min:\n",
      "[2020-10-22 16:23:44,537][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5601/7043) loss:0.412 lr:0.0001000 epoch_Time:42.0min:\n",
      "[2020-10-22 16:25:10,687][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5651/7043) loss:0.415 lr:0.0001000 epoch_Time:40.0min:\n",
      "[2020-10-22 16:26:36,852][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5701/7043) loss:0.218 lr:0.0001000 epoch_Time:39.0min:\n",
      "[2020-10-22 16:28:03,049][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5751/7043) loss:0.352 lr:0.0001000 epoch_Time:37.0min:\n",
      "[2020-10-22 16:29:29,207][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5801/7043) loss:0.400 lr:0.0001000 epoch_Time:36.0min:\n",
      "[2020-10-22 16:30:55,359][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5851/7043) loss:0.724 lr:0.0001000 epoch_Time:34.0min:\n",
      "[2020-10-22 16:32:21,521][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5901/7043) loss:0.311 lr:0.0001000 epoch_Time:33.0min:\n",
      "[2020-10-22 16:33:47,693][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(5951/7043) loss:0.228 lr:0.0001000 epoch_Time:32.0min:\n",
      "[2020-10-22 16:35:13,850][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6001/7043) loss:0.591 lr:0.0001000 epoch_Time:30.0min:\n",
      "[2020-10-22 16:36:40,004][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6051/7043) loss:0.428 lr:0.0001000 epoch_Time:29.0min:\n",
      "[2020-10-22 16:38:06,194][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6101/7043) loss:0.324 lr:0.0001000 epoch_Time:27.0min:\n",
      "[2020-10-22 16:39:32,393][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6151/7043) loss:0.364 lr:0.0001000 epoch_Time:26.0min:\n",
      "[2020-10-22 16:40:58,553][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6201/7043) loss:0.402 lr:0.0001000 epoch_Time:24.0min:\n",
      "[2020-10-22 16:42:24,714][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6251/7043) loss:0.507 lr:0.0001000 epoch_Time:23.0min:\n",
      "[2020-10-22 16:43:50,883][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6301/7043) loss:0.329 lr:0.0001000 epoch_Time:22.0min:\n",
      "[2020-10-22 16:45:17,046][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6351/7043) loss:0.606 lr:0.0001000 epoch_Time:20.0min:\n",
      "[2020-10-22 16:46:43,202][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6401/7043) loss:0.428 lr:0.0001000 epoch_Time:19.0min:\n",
      "[2020-10-22 16:48:09,371][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6451/7043) loss:0.308 lr:0.0001000 epoch_Time:17.0min:\n",
      "[2020-10-22 16:49:35,554][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6501/7043) loss:0.443 lr:0.0001000 epoch_Time:16.0min:\n",
      "[2020-10-22 16:51:01,720][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6551/7043) loss:0.405 lr:0.0001000 epoch_Time:14.0min:\n",
      "[2020-10-22 16:52:27,868][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6601/7043) loss:0.421 lr:0.0001000 epoch_Time:13.0min:\n",
      "[2020-10-22 16:53:54,035][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6651/7043) loss:0.330 lr:0.0001000 epoch_Time:11.0min:\n",
      "[2020-10-22 16:55:20,219][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6701/7043) loss:0.528 lr:0.0001000 epoch_Time:10.0min:\n",
      "[2020-10-22 16:56:46,393][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6751/7043) loss:0.389 lr:0.0001000 epoch_Time:9.0min:\n",
      "[2020-10-22 16:58:12,548][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6801/7043) loss:0.662 lr:0.0001000 epoch_Time:7.0min:\n",
      "[2020-10-22 16:59:38,731][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6851/7043) loss:0.329 lr:0.0001000 epoch_Time:6.0min:\n",
      "[2020-10-22 17:01:04,894][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6901/7043) loss:0.506 lr:0.0001000 epoch_Time:4.0min:\n",
      "[2020-10-22 17:02:31,068][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(6951/7043) loss:0.400 lr:0.0001000 epoch_Time:3.0min:\n",
      "[2020-10-22 17:03:57,251][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(7001/7043) loss:0.597 lr:0.0001000 epoch_Time:1.0min:\n",
      "[2020-10-22 17:05:08,775][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:1(7043/7043) loss:1.321 lr:0.0001000 epoch_Time:0.0min:\n",
      "[2020-10-22 17:09:25,528][<ipython-input-6-4a7b177564e6>][INFO] 各类单独F1:{'sunny': 0.8419291887371062, 'cloudy': 0.7553150367220719, 'others': 0.8649202733485194}  各类F1取平均:0.8207214996025658\n",
      "[2020-10-22 17:09:25,530][<ipython-input-6-4a7b177564e6>][INFO] val_size: 10564  valLoss: 0.3916 valAcc: 0.8303\n",
      "[2020-10-22 17:09:25,538][<ipython-input-6-4a7b177564e6>][INFO] Epoch:[1/30] train_acc=0.839 \n",
      "[2020-10-22 17:09:33,355][<ipython-input-6-4a7b177564e6>][INFO] learning rate:0.0001\n",
      "[2020-10-22 17:09:33,649][<ipython-input-6-4a7b177564e6>][INFO] Epoch 2/30\n",
      "[2020-10-22 17:09:33,649][<ipython-input-6-4a7b177564e6>][INFO] ----------\n",
      "[2020-10-22 17:09:33,650][<ipython-input-6-4a7b177564e6>][INFO] Learning Rate is 0.00005\n",
      "[2020-10-22 17:09:35,999][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1/7043) loss:0.418 lr:0.0000500 epoch_Time:308.0min:\n",
      "[2020-10-22 17:10:58,573][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(51/7043) loss:0.303 lr:0.0000500 epoch_Time:195.0min:\n",
      "[2020-10-22 17:12:21,414][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(101/7043) loss:0.243 lr:0.0000500 epoch_Time:193.0min:\n",
      "[2020-10-22 17:13:44,354][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(151/7043) loss:0.246 lr:0.0000500 epoch_Time:191.0min:\n",
      "[2020-10-22 17:15:07,325][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(201/7043) loss:0.327 lr:0.0000500 epoch_Time:190.0min:\n",
      "[2020-10-22 17:16:30,284][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(251/7043) loss:0.343 lr:0.0000500 epoch_Time:188.0min:\n",
      "[2020-10-22 17:17:53,185][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(301/7043) loss:0.317 lr:0.0000500 epoch_Time:186.0min:\n",
      "[2020-10-22 17:19:16,108][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(351/7043) loss:0.322 lr:0.0000500 epoch_Time:185.0min:\n",
      "[2020-10-22 17:20:39,048][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(401/7043) loss:0.310 lr:0.0000500 epoch_Time:183.0min:\n",
      "[2020-10-22 17:22:02,001][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(451/7043) loss:0.319 lr:0.0000500 epoch_Time:182.0min:\n",
      "[2020-10-22 17:23:24,943][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(501/7043) loss:0.327 lr:0.0000500 epoch_Time:181.0min:\n",
      "[2020-10-22 17:24:47,894][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(551/7043) loss:0.294 lr:0.0000500 epoch_Time:179.0min:\n",
      "[2020-10-22 17:26:10,837][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(601/7043) loss:0.385 lr:0.0000500 epoch_Time:178.0min:\n",
      "[2020-10-22 17:27:33,769][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(651/7043) loss:0.291 lr:0.0000500 epoch_Time:176.0min:\n",
      "[2020-10-22 17:28:56,712][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(701/7043) loss:0.348 lr:0.0000500 epoch_Time:175.0min:\n",
      "[2020-10-22 17:30:19,639][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(751/7043) loss:0.449 lr:0.0000500 epoch_Time:174.0min:\n",
      "[2020-10-22 17:31:42,546][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(801/7043) loss:0.465 lr:0.0000500 epoch_Time:172.0min:\n",
      "[2020-10-22 17:33:05,478][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(851/7043) loss:0.266 lr:0.0000500 epoch_Time:171.0min:\n",
      "[2020-10-22 17:34:28,398][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(901/7043) loss:0.253 lr:0.0000500 epoch_Time:170.0min:\n",
      "[2020-10-22 17:35:51,315][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(951/7043) loss:0.226 lr:0.0000500 epoch_Time:168.0min:\n",
      "[2020-10-22 17:37:14,247][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1001/7043) loss:0.392 lr:0.0000500 epoch_Time:167.0min:\n",
      "[2020-10-22 17:38:37,290][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1051/7043) loss:0.410 lr:0.0000500 epoch_Time:165.0min:\n",
      "[2020-10-22 17:40:00,229][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1101/7043) loss:0.263 lr:0.0000500 epoch_Time:164.0min:\n",
      "[2020-10-22 17:41:23,181][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1151/7043) loss:0.428 lr:0.0000500 epoch_Time:163.0min:\n",
      "[2020-10-22 17:42:46,164][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1201/7043) loss:0.202 lr:0.0000500 epoch_Time:161.0min:\n",
      "[2020-10-22 17:44:09,115][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1251/7043) loss:0.457 lr:0.0000500 epoch_Time:160.0min:\n",
      "[2020-10-22 17:45:32,015][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1301/7043) loss:0.590 lr:0.0000500 epoch_Time:159.0min:\n",
      "[2020-10-22 17:46:54,923][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1351/7043) loss:0.379 lr:0.0000500 epoch_Time:157.0min:\n",
      "[2020-10-22 17:48:17,891][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1401/7043) loss:0.309 lr:0.0000500 epoch_Time:156.0min:\n",
      "[2020-10-22 17:49:40,844][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1451/7043) loss:0.511 lr:0.0000500 epoch_Time:154.0min:\n",
      "[2020-10-22 17:51:03,790][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1501/7043) loss:0.291 lr:0.0000500 epoch_Time:153.0min:\n",
      "[2020-10-22 17:52:26,713][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1551/7043) loss:0.115 lr:0.0000500 epoch_Time:152.0min:\n",
      "[2020-10-22 17:53:49,649][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1601/7043) loss:0.618 lr:0.0000500 epoch_Time:150.0min:\n",
      "[2020-10-22 17:55:12,616][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1651/7043) loss:0.470 lr:0.0000500 epoch_Time:149.0min:\n",
      "[2020-10-22 17:56:35,553][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1701/7043) loss:0.442 lr:0.0000500 epoch_Time:147.0min:\n",
      "[2020-10-22 17:57:58,528][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1751/7043) loss:0.464 lr:0.0000500 epoch_Time:146.0min:\n",
      "[2020-10-22 17:59:21,493][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1801/7043) loss:0.193 lr:0.0000500 epoch_Time:145.0min:\n",
      "[2020-10-22 18:00:44,465][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1851/7043) loss:0.279 lr:0.0000500 epoch_Time:143.0min:\n",
      "[2020-10-22 18:02:07,449][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1901/7043) loss:0.240 lr:0.0000500 epoch_Time:142.0min:\n",
      "[2020-10-22 18:03:30,374][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(1951/7043) loss:0.543 lr:0.0000500 epoch_Time:141.0min:\n",
      "[2020-10-22 18:04:53,367][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2001/7043) loss:0.226 lr:0.0000500 epoch_Time:139.0min:\n",
      "[2020-10-22 18:06:16,321][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2051/7043) loss:0.277 lr:0.0000500 epoch_Time:138.0min:\n",
      "[2020-10-22 18:07:39,279][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2101/7043) loss:0.406 lr:0.0000500 epoch_Time:136.0min:\n",
      "[2020-10-22 18:09:02,225][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2151/7043) loss:0.485 lr:0.0000500 epoch_Time:135.0min:\n",
      "[2020-10-22 18:10:25,111][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2201/7043) loss:0.346 lr:0.0000500 epoch_Time:134.0min:\n",
      "[2020-10-22 18:11:48,020][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2251/7043) loss:0.307 lr:0.0000500 epoch_Time:132.0min:\n",
      "[2020-10-22 18:13:10,960][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2301/7043) loss:0.402 lr:0.0000500 epoch_Time:131.0min:\n",
      "[2020-10-22 18:14:33,924][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2351/7043) loss:0.288 lr:0.0000500 epoch_Time:129.0min:\n",
      "[2020-10-22 18:15:56,896][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2401/7043) loss:0.667 lr:0.0000500 epoch_Time:128.0min:\n",
      "[2020-10-22 18:17:19,883][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2451/7043) loss:0.383 lr:0.0000500 epoch_Time:127.0min:\n",
      "[2020-10-22 18:18:42,853][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2501/7043) loss:0.460 lr:0.0000500 epoch_Time:125.0min:\n",
      "[2020-10-22 18:20:05,741][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2551/7043) loss:0.328 lr:0.0000500 epoch_Time:124.0min:\n",
      "[2020-10-22 18:21:28,666][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2601/7043) loss:0.264 lr:0.0000500 epoch_Time:123.0min:\n",
      "[2020-10-22 18:22:51,601][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2651/7043) loss:0.120 lr:0.0000500 epoch_Time:121.0min:\n",
      "[2020-10-22 18:24:14,567][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2701/7043) loss:0.529 lr:0.0000500 epoch_Time:120.0min:\n",
      "[2020-10-22 18:25:37,498][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2751/7043) loss:0.394 lr:0.0000500 epoch_Time:118.0min:\n",
      "[2020-10-22 18:27:00,434][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2801/7043) loss:0.353 lr:0.0000500 epoch_Time:117.0min:\n",
      "[2020-10-22 18:28:23,389][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2851/7043) loss:0.491 lr:0.0000500 epoch_Time:116.0min:\n",
      "[2020-10-22 18:29:46,301][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2901/7043) loss:0.347 lr:0.0000500 epoch_Time:114.0min:\n",
      "[2020-10-22 18:31:09,244][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(2951/7043) loss:0.302 lr:0.0000500 epoch_Time:113.0min:\n",
      "[2020-10-22 18:32:32,169][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3001/7043) loss:0.228 lr:0.0000500 epoch_Time:112.0min:\n",
      "[2020-10-22 18:33:55,130][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3051/7043) loss:0.344 lr:0.0000500 epoch_Time:110.0min:\n",
      "[2020-10-22 18:35:18,047][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3101/7043) loss:0.138 lr:0.0000500 epoch_Time:109.0min:\n",
      "[2020-10-22 18:36:40,989][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3151/7043) loss:0.151 lr:0.0000500 epoch_Time:107.0min:\n",
      "[2020-10-22 18:38:03,925][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3201/7043) loss:0.333 lr:0.0000500 epoch_Time:106.0min:\n",
      "[2020-10-22 18:39:26,914][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3251/7043) loss:0.417 lr:0.0000500 epoch_Time:105.0min:\n",
      "[2020-10-22 18:40:49,849][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3301/7043) loss:0.430 lr:0.0000500 epoch_Time:103.0min:\n",
      "[2020-10-22 18:42:12,781][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3351/7043) loss:0.345 lr:0.0000500 epoch_Time:102.0min:\n",
      "[2020-10-22 18:43:35,727][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3401/7043) loss:0.100 lr:0.0000500 epoch_Time:100.0min:\n",
      "[2020-10-22 18:44:58,625][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3451/7043) loss:0.402 lr:0.0000500 epoch_Time:99.0min:\n",
      "[2020-10-22 18:46:21,569][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3501/7043) loss:0.688 lr:0.0000500 epoch_Time:98.0min:\n",
      "[2020-10-22 18:47:44,505][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3551/7043) loss:0.516 lr:0.0000500 epoch_Time:96.0min:\n",
      "[2020-10-22 18:49:07,467][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3601/7043) loss:0.397 lr:0.0000500 epoch_Time:95.0min:\n",
      "[2020-10-22 18:50:30,421][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3651/7043) loss:0.313 lr:0.0000500 epoch_Time:94.0min:\n",
      "[2020-10-22 18:51:53,401][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3701/7043) loss:0.467 lr:0.0000500 epoch_Time:92.0min:\n",
      "[2020-10-22 18:53:16,375][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3751/7043) loss:0.179 lr:0.0000500 epoch_Time:91.0min:\n",
      "[2020-10-22 18:54:39,335][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3801/7043) loss:0.363 lr:0.0000500 epoch_Time:89.0min:\n",
      "[2020-10-22 18:56:02,274][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3851/7043) loss:0.184 lr:0.0000500 epoch_Time:88.0min:\n",
      "[2020-10-22 18:57:25,198][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3901/7043) loss:0.341 lr:0.0000500 epoch_Time:87.0min:\n",
      "[2020-10-22 18:58:48,188][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(3951/7043) loss:0.226 lr:0.0000500 epoch_Time:85.0min:\n",
      "[2020-10-22 19:00:11,193][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4001/7043) loss:0.267 lr:0.0000500 epoch_Time:84.0min:\n",
      "[2020-10-22 19:01:34,163][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4051/7043) loss:0.282 lr:0.0000500 epoch_Time:82.0min:\n",
      "[2020-10-22 19:02:57,151][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4101/7043) loss:0.579 lr:0.0000500 epoch_Time:81.0min:\n",
      "[2020-10-22 19:04:20,149][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4151/7043) loss:0.483 lr:0.0000500 epoch_Time:80.0min:\n",
      "[2020-10-22 19:05:43,078][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4201/7043) loss:0.297 lr:0.0000500 epoch_Time:78.0min:\n",
      "[2020-10-22 19:07:06,009][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4251/7043) loss:0.262 lr:0.0000500 epoch_Time:77.0min:\n",
      "[2020-10-22 19:08:28,969][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4301/7043) loss:0.678 lr:0.0000500 epoch_Time:76.0min:\n",
      "[2020-10-22 19:09:51,894][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4351/7043) loss:0.444 lr:0.0000500 epoch_Time:74.0min:\n",
      "[2020-10-22 19:11:14,851][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4401/7043) loss:0.174 lr:0.0000500 epoch_Time:73.0min:\n",
      "[2020-10-22 19:12:37,775][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4451/7043) loss:0.510 lr:0.0000500 epoch_Time:71.0min:\n",
      "[2020-10-22 19:14:00,701][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4501/7043) loss:0.179 lr:0.0000500 epoch_Time:70.0min:\n",
      "[2020-10-22 19:15:23,629][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4551/7043) loss:0.455 lr:0.0000500 epoch_Time:69.0min:\n",
      "[2020-10-22 19:16:46,605][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4601/7043) loss:0.171 lr:0.0000500 epoch_Time:67.0min:\n",
      "[2020-10-22 19:18:09,572][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4651/7043) loss:0.474 lr:0.0000500 epoch_Time:66.0min:\n",
      "[2020-10-22 19:19:32,495][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4701/7043) loss:0.143 lr:0.0000500 epoch_Time:65.0min:\n",
      "[2020-10-22 19:20:55,527][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4751/7043) loss:0.479 lr:0.0000500 epoch_Time:63.0min:\n",
      "[2020-10-22 19:22:18,479][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4801/7043) loss:0.216 lr:0.0000500 epoch_Time:62.0min:\n",
      "[2020-10-22 19:23:41,422][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4851/7043) loss:0.330 lr:0.0000500 epoch_Time:60.0min:\n",
      "[2020-10-22 19:25:04,387][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4901/7043) loss:0.277 lr:0.0000500 epoch_Time:59.0min:\n",
      "[2020-10-22 19:26:27,362][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(4951/7043) loss:0.608 lr:0.0000500 epoch_Time:58.0min:\n",
      "[2020-10-22 19:27:50,342][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5001/7043) loss:0.664 lr:0.0000500 epoch_Time:56.0min:\n",
      "[2020-10-22 19:29:13,343][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5051/7043) loss:0.420 lr:0.0000500 epoch_Time:55.0min:\n",
      "[2020-10-22 19:30:36,307][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5101/7043) loss:0.540 lr:0.0000500 epoch_Time:53.0min:\n",
      "[2020-10-22 19:31:59,266][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5151/7043) loss:0.325 lr:0.0000500 epoch_Time:52.0min:\n",
      "[2020-10-22 19:33:22,254][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5201/7043) loss:0.296 lr:0.0000500 epoch_Time:51.0min:\n",
      "[2020-10-22 19:34:45,198][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5251/7043) loss:0.422 lr:0.0000500 epoch_Time:49.0min:\n",
      "[2020-10-22 19:36:08,123][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5301/7043) loss:0.464 lr:0.0000500 epoch_Time:48.0min:\n",
      "[2020-10-22 19:37:31,066][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5351/7043) loss:0.377 lr:0.0000500 epoch_Time:47.0min:\n",
      "[2020-10-22 19:38:54,024][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5401/7043) loss:0.185 lr:0.0000500 epoch_Time:45.0min:\n",
      "[2020-10-22 19:40:16,979][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5451/7043) loss:0.221 lr:0.0000500 epoch_Time:44.0min:\n",
      "[2020-10-22 19:41:39,945][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5501/7043) loss:0.291 lr:0.0000500 epoch_Time:42.0min:\n",
      "[2020-10-22 19:43:02,804][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5551/7043) loss:0.263 lr:0.0000500 epoch_Time:41.0min:\n",
      "[2020-10-22 19:44:25,718][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5601/7043) loss:0.307 lr:0.0000500 epoch_Time:40.0min:\n",
      "[2020-10-22 19:45:48,689][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5651/7043) loss:0.332 lr:0.0000500 epoch_Time:38.0min:\n",
      "[2020-10-22 19:47:11,635][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5701/7043) loss:0.331 lr:0.0000500 epoch_Time:37.0min:\n",
      "[2020-10-22 19:48:34,597][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5751/7043) loss:0.363 lr:0.0000500 epoch_Time:35.0min:\n",
      "[2020-10-22 19:49:57,600][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5801/7043) loss:0.179 lr:0.0000500 epoch_Time:34.0min:\n",
      "[2020-10-22 19:51:20,585][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5851/7043) loss:0.561 lr:0.0000500 epoch_Time:33.0min:\n",
      "[2020-10-22 19:52:43,580][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5901/7043) loss:0.395 lr:0.0000500 epoch_Time:31.0min:\n",
      "[2020-10-22 19:54:06,557][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(5951/7043) loss:0.406 lr:0.0000500 epoch_Time:30.0min:\n",
      "[2020-10-22 19:55:29,498][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6001/7043) loss:0.662 lr:0.0000500 epoch_Time:29.0min:\n",
      "[2020-10-22 19:56:52,475][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6051/7043) loss:0.197 lr:0.0000500 epoch_Time:27.0min:\n",
      "[2020-10-22 19:58:15,479][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6101/7043) loss:0.494 lr:0.0000500 epoch_Time:26.0min:\n",
      "[2020-10-22 19:59:38,410][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6151/7043) loss:0.490 lr:0.0000500 epoch_Time:24.0min:\n",
      "[2020-10-22 20:01:01,379][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6201/7043) loss:0.463 lr:0.0000500 epoch_Time:23.0min:\n",
      "[2020-10-22 20:02:24,378][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6251/7043) loss:0.205 lr:0.0000500 epoch_Time:22.0min:\n",
      "[2020-10-22 20:03:47,304][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6301/7043) loss:0.431 lr:0.0000500 epoch_Time:20.0min:\n",
      "[2020-10-22 20:05:10,268][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6351/7043) loss:0.335 lr:0.0000500 epoch_Time:19.0min:\n",
      "[2020-10-22 20:06:33,232][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6401/7043) loss:0.367 lr:0.0000500 epoch_Time:18.0min:\n",
      "[2020-10-22 20:07:56,157][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6451/7043) loss:0.349 lr:0.0000500 epoch_Time:16.0min:\n",
      "[2020-10-22 20:09:19,121][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6501/7043) loss:0.262 lr:0.0000500 epoch_Time:15.0min:\n",
      "[2020-10-22 20:10:42,125][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6551/7043) loss:0.419 lr:0.0000500 epoch_Time:13.0min:\n",
      "[2020-10-22 20:12:05,208][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6601/7043) loss:0.366 lr:0.0000500 epoch_Time:12.0min:\n",
      "[2020-10-22 20:13:28,146][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6651/7043) loss:0.459 lr:0.0000500 epoch_Time:11.0min:\n",
      "[2020-10-22 20:14:51,096][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6701/7043) loss:0.545 lr:0.0000500 epoch_Time:9.0min:\n",
      "[2020-10-22 20:16:14,079][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6751/7043) loss:0.238 lr:0.0000500 epoch_Time:8.0min:\n",
      "[2020-10-22 20:17:37,045][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6801/7043) loss:0.455 lr:0.0000500 epoch_Time:6.0min:\n",
      "[2020-10-22 20:19:00,014][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6851/7043) loss:0.593 lr:0.0000500 epoch_Time:5.0min:\n",
      "[2020-10-22 20:20:22,977][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6901/7043) loss:0.400 lr:0.0000500 epoch_Time:4.0min:\n",
      "[2020-10-22 20:21:45,947][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(6951/7043) loss:0.299 lr:0.0000500 epoch_Time:2.0min:\n",
      "[2020-10-22 20:23:08,927][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(7001/7043) loss:0.318 lr:0.0000500 epoch_Time:1.0min:\n",
      "[2020-10-22 20:24:17,794][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:2(7043/7043) loss:0.487 lr:0.0000500 epoch_Time:0.0min:\n",
      "[2020-10-22 20:28:34,581][<ipython-input-6-4a7b177564e6>][INFO] 各类单独F1:{'sunny': 0.8552421500798296, 'cloudy': 0.7365939893930465, 'others': 0.856002816570825}  各类F1取平均:0.8159463186812337\n",
      "[2020-10-22 20:28:34,583][<ipython-input-6-4a7b177564e6>][INFO] val_size: 10564  valLoss: 0.4085 valAcc: 0.8270\n",
      "[2020-10-22 20:28:34,591][<ipython-input-6-4a7b177564e6>][INFO] Epoch:[2/30] train_acc=0.843 \n",
      "[2020-10-22 20:28:38,436][<ipython-input-6-4a7b177564e6>][INFO] learning rate:5e-05\n",
      "[2020-10-22 20:28:38,437][<ipython-input-6-4a7b177564e6>][INFO] Epoch 3/30\n",
      "[2020-10-22 20:28:38,437][<ipython-input-6-4a7b177564e6>][INFO] ----------\n",
      "[2020-10-22 20:28:38,438][<ipython-input-6-4a7b177564e6>][INFO] Learning Rate is 0.00003\n",
      "[2020-10-22 20:28:40,806][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:3(1/7043) loss:0.220 lr:0.0000250 epoch_Time:276.0min:\n",
      "[2020-10-22 20:30:03,583][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:3(51/7043) loss:0.289 lr:0.0000250 epoch_Time:194.0min:\n",
      "[2020-10-22 20:31:26,484][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:3(101/7043) loss:0.212 lr:0.0000250 epoch_Time:193.0min:\n",
      "[2020-10-22 20:32:49,421][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:3(151/7043) loss:0.415 lr:0.0000250 epoch_Time:191.0min:\n",
      "[2020-10-22 20:34:12,350][<ipython-input-6-4a7b177564e6>][INFO]  Epoch:3(201/7043) loss:0.425 lr:0.0000250 epoch_Time:189.0min:\n"
     ]
    }
   ],
   "source": [
    "#\n",
    "import timm\n",
    "from pytorch_toolbelt import losses as L\n",
    "def train_model(model,criterion, optimizer, lr_scheduler,arc_model=None):\n",
    "\n",
    "    train_dataset = wheatherDataset(opt.train_val_data, opt.train_list, phase='train', input_size=opt.input_size)\n",
    "    trainloader = DataLoader(train_dataset,\n",
    "                             batch_size=opt.train_batch_size,\n",
    "                             shuffle=True,\n",
    "                             num_workers=opt.num_workers)\n",
    "\n",
    "    total_iters=len(trainloader)\n",
    "    logger.info('total_iters:{}'.format(total_iters))\n",
    "    model_name=opt.backbone\n",
    "    train_loss = []\n",
    "    since = time.time()\n",
    "    best_model_wts = model.state_dict()\n",
    "    best_score = 0.0\n",
    "    model.train(True)\n",
    "    logger.info('start training...')\n",
    "    #\n",
    "    iters = len(trainloader)\n",
    "    for epoch in range(1,opt.max_epoch+1):\n",
    "        begin_time=time.time()\n",
    "        logger.info('learning rate:{}'.format(optimizer.param_groups[-1]['lr']))\n",
    "        logger.info('Epoch {}/{}'.format(epoch, opt.max_epoch))\n",
    "        logger.info('-' * 10)\n",
    "        optimizer = lr_scheduler(optimizer, epoch)\n",
    "        running_loss = 0.0\n",
    "        running_corrects_linear = 0\n",
    "        running_corrects_arc=0\n",
    "        count=0\n",
    "        best_epoch=0\n",
    "        for i, data in enumerate(trainloader):\n",
    "            count+=1\n",
    "            inputs, labels = data\n",
    "            labels = labels.type(torch.LongTensor)\n",
    "            inputs, labels = inputs.cuda(), labels.cuda()\n",
    "            #\n",
    "            out_linear= model(inputs)\n",
    "            _, linear_preds = torch.max(out_linear.data, 1)\n",
    "            loss = criterion(out_linear, labels)\n",
    "            #\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            if i % opt.print_interval == 0 or out_linear.size()[0] < opt.train_batch_size:\n",
    "                spend_time = time.time() - begin_time\n",
    "                logger.info(\n",
    "                    ' Epoch:{}({}/{}) loss:{:.3f} lr:{:.7f} epoch_Time:{}min:'.format(\n",
    "                        epoch, count, total_iters,\n",
    "                        loss.item(), optimizer.param_groups[-1]['lr'],\n",
    "                        spend_time / count * total_iters // 60 - spend_time // 60))\n",
    "                train_loss.append(loss.item())\n",
    "            running_corrects_linear += torch.sum(linear_preds == labels.data)\n",
    "            #\n",
    "        weight_score = val_model(model, criterion)\n",
    "        epoch_acc_linear = running_corrects_linear.double() / total_iters / opt.train_batch_size\n",
    "        logger.info('Epoch:[{}/{}] train_acc={:.3f} '.format(epoch, opt.max_epoch,\n",
    "                                                                    epoch_acc_linear))\n",
    "        save_dir = os.path.join(opt.checkpoints_dir, model_name)\n",
    "        if not os.path.exists(save_dir):\n",
    "            os.makedirs(save_dir)\n",
    "        model_out_path = save_dir + \"/\" + '{}_'.format(model_name) + str(epoch) + '.pth'\n",
    "        best_model_out_path = save_dir + \"/\" + '{}_'.format(model_name) + 'best' + '.pth'\n",
    "        #save the best model\n",
    "        if weight_score > best_score:\n",
    "            best_score = weight_score\n",
    "            best_epoch=epoch\n",
    "            torch.save(model.state_dict(), best_model_out_path)\n",
    "        #save based on epoch interval\n",
    "        if epoch % opt.save_interval == 0 and epoch>opt.min_save_epoch:\n",
    "            torch.save(model.state_dict(), model_out_path)\n",
    "    #\n",
    "    #optimizer.swap_swa_sgd\n",
    "    #\n",
    "    logger.info('Best WeightF1: {:.3f} Best epoch:{}'.format(best_score,best_epoch))\n",
    "    time_elapsed = time.time() - since\n",
    "    logger.info('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n",
    "\n",
    "\n",
    "def val_model(model, criterion):\n",
    "    val_dataset = wheatherDataset(opt.train_val_data, opt.val_list, phase='val', input_size=opt.input_size)\n",
    "    val_loader = DataLoader(val_dataset,\n",
    "                             batch_size=opt.val_batch_size,\n",
    "                             shuffle=False,\n",
    "                             num_workers=opt.num_workers)\n",
    "    dset_sizes=len(val_dataset)\n",
    "    model.eval()\n",
    "    running_loss = 0.0\n",
    "    running_corrects = 0\n",
    "    cont = 0\n",
    "    outPre = []\n",
    "    outLabel = []\n",
    "    pres_list=[]\n",
    "    labels_list=[]\n",
    "    for data in val_loader:\n",
    "        inputs, labels = data\n",
    "        labels = labels.type(torch.LongTensor)\n",
    "        inputs, labels = inputs.cuda(), labels.cuda()\n",
    "        outputs = model(inputs)\n",
    "        _, preds = torch.max(outputs.data, 1)\n",
    "        loss = criterion(outputs, labels)\n",
    "        if cont == 0:\n",
    "            outPre = outputs.data.cpu()\n",
    "            outLabel = labels.data.cpu()\n",
    "        else:\n",
    "            outPre = torch.cat((outPre, outputs.data.cpu()), 0)\n",
    "            outLabel = torch.cat((outLabel, labels.data.cpu()), 0)\n",
    "        pres_list+=preds.cpu().numpy().tolist()\n",
    "        labels_list+=labels.data.cpu().numpy().tolist()\n",
    "        running_loss += loss.item() * inputs.size(0)\n",
    "        running_corrects += torch.sum(preds == labels.data)\n",
    "        cont += 1\n",
    "    _,_, f_class, _= precision_recall_fscore_support(y_true=labels_list, y_pred=pres_list,labels=[0, 1, 2],\n",
    "     average=None)                                                                   \n",
    "    fper_class = {'sunny': f_class[0], 'cloudy': f_class[1], 'others': f_class[2]}\n",
    "    logger.info('各类单独F1:{}  各类F1取平均:{}'.format(fper_class, f_class.mean()))\n",
    "    logger.info('val_size: {}  valLoss: {:.4f} valAcc: {:.4f}'.format(dset_sizes, running_loss / dset_sizes, running_corrects.double() / dset_sizes))\n",
    "    val_acc = accuracy_score(labels_list, pres_list)    \n",
    "    return val_acc\n",
    "\n",
    "\n",
    "def exp_lr_scheduler(optimizer, epoch):\n",
    "    #lr = opt.lr * (0.8**(epoch / opt.lr_decay_epoch))\n",
    "    lr = opt.lr * 0.5**(epoch-1)\n",
    "    logger.info('Learning Rate is {:.5f}'.format(lr))\n",
    "    for param_group in optimizer.param_groups:\n",
    "        param_group['lr'] = lr\n",
    "    return optimizer\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    #\n",
    "    opt = Config()\n",
    "    torch.cuda.empty_cache()\n",
    "    device = torch.device(opt.device)\n",
    "    #if opt.loss == 'focal_loss':\n",
    "        #criterion = FocalLoss(gamma=2)\n",
    "    #else:\n",
    "    #weight_class=torch.FloatTensor([0.34, 0.24, 0.42])\n",
    "    #criterion = torch.nn.CrossEntropyLoss(weight=weight_class).cuda()\n",
    "    criterion = torch.nn.CrossEntropyLoss().cuda()\n",
    "    use_smooth_label=False\n",
    "    if use_smooth_label:\n",
    "        criterion = L.JointLoss(first=torch.nn.CrossEntropyLoss(weight=weight_class), second=LabelSmoothSoftmaxCE(),\n",
    "                              first_weight=0.5, second_weight=0.5).cuda()\n",
    "    #\n",
    "    model_name =opt.backbone\n",
    "    logger = get_logger(os.path.join(opt.checkpoints_dir , model_name+'.log'))\n",
    "    logger.info('Using: {}'.format(model_name))\n",
    "    logger.info('InputSize: {}'.format(opt.input_size))\n",
    "    logger.info('optimizer: {}'.format(opt.optimizer))\n",
    "    logger.info('lr_init: {}'.format(opt.lr))\n",
    "    logger.info('criterion: {}'.format(opt.loss))\n",
    "    logger.info('Using the GPU: {}'.format(str(opt.gpu_id)))\n",
    "\n",
    "    #model  = make_model('{}'.format(model_name), num_classes=3,\n",
    "    #                    pretrained=True)\n",
    "    model = timm.create_model('swsl_resnext101_32x8d', pretrained=True)\n",
    "    num_ftrs = model.fc.in_features\n",
    "    model.fc = nn.Linear(num_ftrs, 3)\n",
    "    model.to(device)\n",
    "    model = nn.DataParallel(model)\n",
    "    #pretrained\n",
    "    net_weight = torch.load('/home/kesci/work/checkpoints/wsl_83/wsl_1_83.pth')\n",
    "    #net_weight = torch.load('/home/kesci/work/checkpoints/wsl_84/wsl_1_84.pth')\n",
    "    model.load_state_dict(net_weight)\n",
    "    if opt.optimizer == 'sgd':\n",
    "        optimizer = optim.SGD((model.parameters()), lr=opt.lr, momentum=opt.MOMENTUM, weight_decay=0.0004)\n",
    "    else:\n",
    "        optimizer = optim.Adam(model.parameters(), lr=opt.lr)\n",
    "\n",
    "    train_model(model, criterion, optimizer,\n",
    "              lr_scheduler=exp_lr_scheduler)\n",
    "\n",
    "    torch.cuda.empty_cache()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "2B2088F3DFAB42F88BBF25E11E57BDFD",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "1+1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "id": "E5A60116578B446A84C359EB58497F2D",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试数据量:72778,batch数:2274\n",
      "download model finished....\n",
      "0\n",
      "100\n",
      "200\n",
      "300\n",
      "400\n",
      "500\n",
      "600\n",
      "700\n",
      "800\n",
      "900\n",
      "1000\n",
      "1100\n",
      "1200\n",
      "1300\n",
      "1400\n",
      "1500\n",
      "1600\n",
      "1700\n",
      "1800\n",
      "1900\n",
      "2000\n",
      "2100\n",
      "2200\n"
     ]
    }
   ],
   "source": [
    "#infer\n",
    "import tqdm\n",
    "import timm\n",
    "class wheatherDatasetInfer(Dataset):\n",
    "\n",
    "    def __init__(self,data_dir, input_size=300,transform=None):\n",
    "        self.image_paths = sorted(glob.glob(data_dir+'/*'))\n",
    "        self.transforms = T.Compose([\n",
    "                T.Resize((input_size,input_size)),\n",
    "                T.ToTensor(),\n",
    "                T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n",
    "            ])\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        sample_path = self.image_paths[index]\n",
    "        data = Image.open(sample_path)\n",
    "        data = data.convert('RGB')\n",
    "        data = self.transforms(data)\n",
    "        return data.float(),sample_path\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.image_paths)\n",
    "@torch.no_grad()\n",
    "def infer_wheather(model):\n",
    "    pres_dic = {'id':[],'weather':[]}\n",
    "    cnt = 0\n",
    "    for data in dataset_loaders:\n",
    "        if cnt % 100 == 0:\n",
    "            print(cnt)\n",
    "        cnt += 1\n",
    "        inputs, image_paths = data\n",
    "        inputs = inputs.cuda()\n",
    "        outputs = model(inputs)\n",
    "        #out_prob = F.softmax(outputs)[0]\n",
    "        _, preds = torch.max(outputs.data, 1)\n",
    "        cls = preds.cpu().numpy().tolist()\n",
    "        image_names=[per.split('/')[-1] for per in image_paths]\n",
    "        #print(image_names)\n",
    "        pres_dic['id']+=image_names\n",
    "        pres_dic['weather']+=cls\n",
    "    #\n",
    "    df_sub=pd.DataFrame(pres_dic)\n",
    "    df_sub.to_csv(opt.submit_csv_dir+'10_23_821.csv',index=False)\n",
    "\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    opt = Config()\n",
    "    # some parameters\n",
    "    use_gpu = torch.cuda.is_available()\n",
    "    os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "    image_datasets = wheatherDatasetInfer(opt.test_dir,opt.input_size)\n",
    "    #\n",
    "    dataset_loaders = torch.utils.data.DataLoader(image_datasets,\n",
    "                                                  batch_size=32,\n",
    "                                                  shuffle=False, num_workers=4)\n",
    "    data_set_sizes = len(image_datasets)\n",
    "    print('测试数据量:{},batch数:{}'.format(data_set_sizes,data_set_sizes//32))\n",
    "    #\n",
    "    #model = make_model('{}'.format(opt.backbone), num_classes=3,\n",
    "                         #pretrained=False)\n",
    "    model = timm.create_model('swsl_resnext101_32x8d', pretrained=False)\n",
    "    num_ftrs = model.fc.in_features\n",
    "    model.fc = nn.Linear(num_ftrs, 3)\n",
    "    #\n",
    "    device = torch.device(opt.device)\n",
    "    model.to(device)\n",
    "    model = nn.DataParallel(model)\n",
    "    net_weight = torch.load('/home/kesci/work/checkpoints/wsl/wsl_1.pth')\n",
    "    model.load_state_dict(net_weight)\n",
    "    model.eval()\n",
    "    print('download model finished....')\n",
    "    #\n",
    "    infer_wheather(model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "id": "982EA855815A484883E5E2CA5C1E84DE",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试数据量:72778,batch数:2274\n",
      "download model1 finished....\n",
      "download model2 finished....\n",
      "download model3 finished....\n",
      "0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<ipython-input-8-8aa2ef523ef4>:39: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n",
      "  all_outs.append(F.softmax(outputs))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "100\n",
      "200\n",
      "300\n",
      "400\n",
      "500\n",
      "600\n",
      "700\n",
      "800\n",
      "900\n",
      "1000\n",
      "1100\n",
      "1200\n",
      "1300\n",
      "1400\n",
      "1500\n",
      "1600\n",
      "1700\n",
      "1800\n",
      "1900\n",
      "2000\n",
      "2100\n",
      "2200\n"
     ]
    }
   ],
   "source": [
    "#infer 三模融合\n",
    "import timm\n",
    "from torch.nn import functional as F\n",
    "class wheatherDatasetInfer(Dataset):\n",
    "\n",
    "    def __init__(self,data_dir, input_size=300,transform=None):\n",
    "        self.image_paths = sorted(glob.glob(data_dir+'/*'))\n",
    "        self.transforms = T.Compose([\n",
    "                T.Resize((input_size,input_size)),\n",
    "                T.ToTensor(),\n",
    "                T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n",
    "            ])\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        sample_path = self.image_paths[index]\n",
    "        data = Image.open(sample_path)\n",
    "        data = data.convert('RGB')\n",
    "        data = self.transforms(data)\n",
    "        return data.float(),sample_path\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.image_paths)\n",
    "@torch.no_grad()\n",
    "def infer_wheather(model_1,model_2,model_3):\n",
    "    pres_dic = {'id':[],'weather':[]}\n",
    "    cnt = 0\n",
    "    all_pathes=[]\n",
    "    all_outs=[]\n",
    "    for data in dataset_loaders:\n",
    "        if cnt % 100 == 0:\n",
    "            print(cnt)\n",
    "        cnt += 1\n",
    "        inputs, image_paths = data\n",
    "        inputs = inputs.cuda()\n",
    "        outputs1 = model_1(inputs)\n",
    "        outputs2 = model_2(inputs)\n",
    "        outputs3 = model_3(inputs)\n",
    "        outputs=(outputs1+outputs2+outputs3)/3#算数平均\n",
    "        all_outs.append(F.softmax(outputs))\n",
    "        #outputs_geom=torch.pow(outputs1*outputs2*outputs3,1/3)#几何平均\n",
    "        _, preds = torch.max(outputs.data, 1)\n",
    "        cls = preds.cpu().numpy().tolist()\n",
    "        image_names=[per.split('/')[-1] for per in image_paths]\n",
    "        #print(image_names)\n",
    "        pres_dic['id']+=image_names\n",
    "        pres_dic['weather']+=cls\n",
    "        all_pathes.append(image_names)\n",
    "    #\n",
    "    df_sub=pd.DataFrame(pres_dic)\n",
    "    #df_sub.to_csv(opt.submit_csv_dir+'10_20v2Mean3.csv',index=False)\n",
    "    return all_pathes,all_outs\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    opt = Config()\n",
    "    # some parameters\n",
    "    use_gpu = torch.cuda.is_available()\n",
    "    os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "    image_datasets = wheatherDatasetInfer(opt.test_dir,opt.input_size)\n",
    "    #\n",
    "    dataset_loaders = torch.utils.data.DataLoader(image_datasets,\n",
    "                                                  batch_size=32,\n",
    "                                                  shuffle=False, num_workers=4)\n",
    "    data_set_sizes = len(image_datasets)\n",
    "    print('测试数据量:{},batch数:{}'.format(data_set_sizes,data_set_sizes//32))\n",
    "    #\n",
    "    #model = make_model('{}'.format(opt.backbone), num_classes=3,\n",
    "                         #pretrained=False)\n",
    "    device = torch.device(opt.device)\n",
    "    model_1 = timm.create_model('swsl_resnext101_32x8d', pretrained=False)\n",
    "    num_ftrs = model_1.fc.in_features\n",
    "    model_1.fc = nn.Linear(num_ftrs, 3)\n",
    "    #\n",
    "    model_1.to(device)\n",
    "    model_1 = nn.DataParallel(model_1)\n",
    "    net_weight_1 = torch.load('/home/kesci/work/checkpoints/wsl_84/wsl_1_84.pth')\n",
    "    model_1.load_state_dict(net_weight_1)\n",
    "    model_1.eval()\n",
    "    print('download model1 finished....')\n",
    "    #\n",
    "    model_2 = timm.create_model('swsl_resnext101_32x8d', pretrained=False)\n",
    "    num_ftrs = model_2.fc.in_features\n",
    "    model_2.fc = nn.Linear(num_ftrs, 3)\n",
    "    model_2.to(device)\n",
    "    model_2 = nn.DataParallel(model_2)\n",
    "    net_weight_2 = torch.load('/home/kesci/work/checkpoints/wsl_84244/wsl_1.pth')\n",
    "    model_2.load_state_dict(net_weight_2)\n",
    "    model_2.eval()\n",
    "    print('download model2 finished....')\n",
    "    #\n",
    "    #\n",
    "    model_3 = timm.create_model('swsl_resnext101_32x8d', pretrained=False)\n",
    "    num_ftrs = model_3.fc.in_features\n",
    "    model_3.fc = nn.Linear(num_ftrs, 3)\n",
    "    model_3.to(device)\n",
    "    model_3 = nn.DataParallel(model_3)\n",
    "    net_weight_3 = torch.load('/home/kesci/work/checkpoints/wsl_84_v1/wsl_1.pth')\n",
    "    model_3.load_state_dict(net_weight_3)\n",
    "    model_3.eval()\n",
    "    print('download model3 finished....')\n",
    "    all_pathes,all_outs=infer_wheather(model_1,model_2,model_3)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "id": "BC9C4FB3D5F84AB695C8D34A56521658",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "52797"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "id": "0AC000EDCA7845738CCC5B20E89E0255",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "'''\n",
    "reserve_list=[]#按照概率进行伪标签采样\n",
    "cnt_pseudo_df={'image_name':[],'pro':[]}\n",
    "for i in range(len(all_pathes)):\n",
    "    out_pre=all_outs[i]\n",
    "    max_pro, preds = torch.max(out_pre, 1)\n",
    "    max_pro_list = max_pro.cpu().numpy().tolist()\n",
    "    for jj in range(len(max_pro_list)):\n",
    "        #print(max_pro_list[jj])\n",
    "        cnt_pseudo_df['pro'].append(max_pro_list[jj])\n",
    "        cnt_pseudo_df['image_name'].append(all_pathes[i][jj])\n",
    "        if max_pro_list[jj]>0.8:\n",
    "            reserve_list.append(all_pathes[i][jj])\n",
    "len(reserve_list)\n",
    "import pandas as pd\n",
    "cnt_pseudo_df=pd.DataFrame(cnt_pseudo_df)\n",
    "cnt_pseudo_df.to_csv('/home/kesci/work/submit/cnt_pseudo_df.csv',index=False)\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "id": "48FCC81478D14BE997FD31F028E0298E",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "#3模融合(投票)\n",
    "\n",
    "result_1=pd.read_csv('/home/kesci/work/submit/10_20v1_844.csv')\n",
    "result_2=pd.read_csv('/home/kesci/work/submit/10_20v2_845.csv')\n",
    "result_3=pd.read_csv('/home/kesci/work/submit/10_18v1.csv')\n",
    "result_4=pd.read_csv('/home/kesci/work/submit/10_20v2Mean3.csv')\n",
    "sub_1=result_1['weather'].values\n",
    "sub_2=result_2['weather'].values\n",
    "sub_3=result_3['weather'].values\n",
    "sub_4=result_4['weather'].values\n",
    "sub_5=result_5['weather'].values\n",
    "result_list=[]\n",
    "for i in range(len(sub_1)):\n",
    "    tmp = {0: 0, 1: 0, 2: 0}\n",
    "    votes=[sub_1[i],sub_2[i],sub_3[i],sub_4[i],sub_4[i],sub_4[i]]\n",
    "    for k in votes:\n",
    "        tmp[k] += 1\n",
    "    #\n",
    "    most = sorted(tmp.items(), key=lambda item: item[1])[-1][0]\n",
    "    result_list.append(most)\n",
    "#\n",
    "result=result_1.copy()\n",
    "result['weather']=result_list\n",
    "result.to_csv('/home/kesci/work/submit/10_22voteV2.csv',index=False)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "id": "4A17D6AD2B894C0985BBCFCF5BD844EF",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pseudo len: 52797\n",
      "伪标签制作完成 52797\n"
     ]
    }
   ],
   "source": [
    "reserve_list=[]\n",
    "df_tmp=pd.read_csv('/home/kesci/work/submit/cnt_pseudo_df.csv')\n",
    "for index,img_name,pro in df_tmp.itertuples():\n",
    "    if pro>0.8:\n",
    "        reserve_list.append(img_name)\n",
    "#\n",
    "#pseudo-label 利用线上8456的模型做一个伪标签\n",
    "best_true=pd.read_csv('/home/kesci/work/submit/10_20voteV2_8456.csv')\n",
    "train_txt='/home/kesci/work/dataset/train_pseudo.txt'\n",
    "w_contine=open(train_txt,'a')#追加测试集的标签\n",
    "\n",
    "print('pseudo len:',len(reserve_list))\n",
    "cnt=0\n",
    "for index,img_name,value in best_true.itertuples():\n",
    "    if img_name in reserve_list:\n",
    "        w_contine.write('test_set/'+img_name+','+str(value)+'\\n')\n",
    "        cnt+=1\n",
    "    #print(img_name,value)\n",
    "w_contine.close()\n",
    "print('伪标签制作完成',cnt)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "id": "3769C25FDFF940B4BE6206ADA460AF70",
    "jupyter": {},
    "notebookId": "5f8aa02cbfe3ac0015eaaffd",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\nimport tqdm\\nimport timm\\nclass wheatherDatasetInfer(Dataset):\\n\\n    def __init__(self,data_dir, input_size=300,modes=0):\\n        self.image_paths = sorted(glob.glob(data_dir+\\'/*\\'))\\n\\n        \\n        self.transforms= T.Compose([\\n                    T.Resize((input_size,input_size)),\\n                    T.RandomHorizontalFlip(p=1),\\n                    T.ToTensor(),#T.RandomHorizontalFlip(p=1),\\n                    T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\\n                ])\\n\\n    def __getitem__(self, index):\\n        sample_path = self.image_paths[index]\\n        data = Image.open(sample_path)\\n        data = data.convert(\\'RGB\\')\\n        data = self.transforms(data)\\n        return data.float(),sample_path\\n\\n    def __len__(self):\\n        return len(self.image_paths)\\n#infer\\nimport tqdm\\nimport timm\\n@torch.no_grad()\\ndef infer_wheather(model):\\n    pres_dic = {\\'id\\':[],\\'weather\\':[]}\\n    cnt = 0\\n    all_output=[]\\n    all_pathes=[]\\n    for data in dataset_loaders:\\n        if cnt % 100 == 0:\\n            print(cnt)\\n        cnt += 1\\n        inputs, image_paths = data\\n        inputs = inputs.cuda()\\n        outputs = model(inputs)\\n        all_output.append(outputs)\\n        image_names=[per.split(\\'/\\')[-1] for per in image_paths]\\n        all_pathes.append(image_names)\\n    #\\n    return all_pathes,all_output\\n\\n\\n\\nif __name__ == \\'__main__\\':\\n    opt = Config()\\n    # some parameters\\n    use_gpu = torch.cuda.is_available()\\n    os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\\n\\n    image_datasets = wheatherDatasetInfer(opt.test_dir,opt.input_size)\\n    #\\n    dataset_loaders = torch.utils.data.DataLoader(image_datasets,\\n                                                  batch_size=48,\\n                                                  shuffle=False, num_workers=8)\\n    data_set_sizes = len(image_datasets)\\n    print(\\'测试数据量:{},batch数:{}\\'.format(data_set_sizes,data_set_sizes//48))\\n    #\\n    #model = make_model(\\'{}\\'.format(opt.backbone), num_classes=3,\\n                         #pretrained=False)\\n    model = timm.create_model(\\'swsl_resnext101_32x8d\\', pretrained=False)\\n    num_ftrs = model.fc.in_features\\n    model.fc = nn.Linear(num_ftrs, 3)\\n    #\\n    device = torch.device(opt.device)\\n    model.to(device)\\n    model = nn.DataParallel(model)\\n    net_weight = torch.load(\\'/home/kesci/work/checkpoints/wsl/wsl_1.pth\\')\\n    model.load_state_dict(net_weight)\\n    model.eval()\\n    print(\\'download model finished....\\')\\n    #\\n    all_pathes_1_1,all_output_2_1=infer_wheather(model)\\n'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#infer\n",
    "#尝试TTA\n",
    "'''\n",
    "import tqdm\n",
    "import timm\n",
    "class wheatherDatasetInfer(Dataset):\n",
    "\n",
    "    def __init__(self,data_dir, input_size=300,modes=0):\n",
    "        self.image_paths = sorted(glob.glob(data_dir+'/*'))\n",
    "\n",
    "        \n",
    "        self.transforms= T.Compose([\n",
    "                    T.Resize((input_size,input_size)),\n",
    "                    T.RandomHorizontalFlip(p=1),\n",
    "                    T.ToTensor(),#T.RandomHorizontalFlip(p=1),\n",
    "                    T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n",
    "                ])\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        sample_path = self.image_paths[index]\n",
    "        data = Image.open(sample_path)\n",
    "        data = data.convert('RGB')\n",
    "        data = self.transforms(data)\n",
    "        return data.float(),sample_path\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.image_paths)\n",
    "#infer\n",
    "import tqdm\n",
    "import timm\n",
    "@torch.no_grad()\n",
    "def infer_wheather(model):\n",
    "    pres_dic = {'id':[],'weather':[]}\n",
    "    cnt = 0\n",
    "    all_output=[]\n",
    "    all_pathes=[]\n",
    "    for data in dataset_loaders:\n",
    "        if cnt % 100 == 0:\n",
    "            print(cnt)\n",
    "        cnt += 1\n",
    "        inputs, image_paths = data\n",
    "        inputs = inputs.cuda()\n",
    "        outputs = model(inputs)\n",
    "        all_output.append(outputs)\n",
    "        image_names=[per.split('/')[-1] for per in image_paths]\n",
    "        all_pathes.append(image_names)\n",
    "    #\n",
    "    return all_pathes,all_output\n",
    "\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    opt = Config()\n",
    "    # some parameters\n",
    "    use_gpu = torch.cuda.is_available()\n",
    "    os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "    image_datasets = wheatherDatasetInfer(opt.test_dir,opt.input_size)\n",
    "    #\n",
    "    dataset_loaders = torch.utils.data.DataLoader(image_datasets,\n",
    "                                                  batch_size=48,\n",
    "                                                  shuffle=False, num_workers=8)\n",
    "    data_set_sizes = len(image_datasets)\n",
    "    print('测试数据量:{},batch数:{}'.format(data_set_sizes,data_set_sizes//48))\n",
    "    #\n",
    "    #model = make_model('{}'.format(opt.backbone), num_classes=3,\n",
    "                         #pretrained=False)\n",
    "    model = timm.create_model('swsl_resnext101_32x8d', pretrained=False)\n",
    "    num_ftrs = model.fc.in_features\n",
    "    model.fc = nn.Linear(num_ftrs, 3)\n",
    "    #\n",
    "    device = torch.device(opt.device)\n",
    "    model.to(device)\n",
    "    model = nn.DataParallel(model)\n",
    "    net_weight = torch.load('/home/kesci/work/checkpoints/wsl/wsl_1.pth')\n",
    "    model.load_state_dict(net_weight)\n",
    "    model.eval()\n",
    "    print('download model finished....')\n",
    "    #\n",
    "    all_pathes_1_1,all_output_2_1=infer_wheather(model)\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "id": "A823E5258561495989F2ADE505764B6F",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "'''\n",
    "pres_dic = {'id':[],'weather':[]}\n",
    "for i in range(len(all_output_2_1)):\n",
    "    outputs=torch.pow(all_output_2[i]*all_output_2_1[i],1/2)\n",
    "    _, preds = torch.max(outputs.data, 1)\n",
    "    cls = preds.cpu().numpy().tolist()\n",
    "    image_names=all_pathes_1[i]\n",
    "    #print(image_names)\n",
    "    pres_dic['id']+=image_names\n",
    "    pres_dic['weather']+=cls\n",
    "    #\n",
    "df_sub=pd.DataFrame(pres_dic)\n",
    "df_sub.to_csv(opt.submit_csv_dir+'10_18TTA.csv',index=False)\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "id": "E4D5853131EF4B02969F5D70DE520DB5",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2020-10-22 13:28:24 URL:https://cdn.kesci.com/submit_tool/v4/kesci_submit [7357446/7357446] -> \"kesci_submit\" [1]\r\n"
     ]
    }
   ],
   "source": [
    "!wget -nv -O kesci_submit https://cdn.kesci.com/submit_tool/v4/kesci_submit&&chmod +x kesci_submit"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "id": "2E55B5A11EDE44A985C92FCB7C014BE8",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Kesci Submit Tool 4.0.0\n",
      "\n",
      "> 已验证Token\n",
      "> 提交文件 /home/kesci/work/submit/10_20v2Mean3.csv (1204.74 KiB), Target Qiniu\n",
      "> 已上传 100 %\n",
      "> 文件已上传        \n",
      "=========================\n",
      "> 错误: 提交失败\n",
      "> 服务器响应 400 {\"message\":\"今日提交次数达到上限\"}\n"
     ]
    }
   ],
   "source": [
    "!./kesci_submit -token a030e738c18bd9f2 -file /home/kesci/work/submit/10_20v2Mean3.csv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "id": "D604519AC42644F799778ED6040448DB",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "scrolled": false,
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--2020-10-17 03:12:07--  https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth\n",
      "Resolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 104.22.74.142, 104.22.75.142, 172.67.9.4, ...\n",
      "Connecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|104.22.74.142|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 356056638 (340M) [application/octet-stream]\n",
      "Saving to: ‘semi_weakly_supervised_resnext101_32x8-b4712904.pth’\n",
      "\n",
      "-b4712904.pth        19%[==>                 ]  64.54M  40.1KB/s    eta 50m 11s"
     ]
    }
   ],
   "source": [
    "#import timm\n",
    "#model = timm.create_model('swsl_resnext101_32x8d', pretrained=True)\n",
    "#!wget https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "5B977A2FBC05478BA6AEB70C7A5334C5",
    "jupyter": {},
    "notebookId": "5f892607bfe3ac0015e354b2",
    "slideshow": {
     "slide_type": "slide"
    },
    "tags": []
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
