{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /home/phd-fan.weiquan2/.netrc\n"
     ]
    }
   ],
   "source": [
    "!wandb login fef2375a2e5bbeff568e67838b427f129b7fa678"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='7'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mqftie\u001b[0m (use `wandb login --relogin` to force relogin)\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: wandb version 0.12.7 is available!  To upgrade, please run:\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m:  $ pip install wandb --upgrade\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "                    Syncing run <strong><a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/1nxi08ij\" target=\"_blank\">roberta-large</a></strong> to <a href=\"https://wandb.ai/qftie/cped-emo-cls\" target=\"_blank\">Weights & Biases</a> (<a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">docs</a>).<br/>\n",
       "\n",
       "                "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<button onClick=\"this.nextSibling.style.display='block';this.style.display='none';\">Display W&B run</button><iframe src=\"https://wandb.ai/qftie/cped-emo-cls/runs/1nxi08ij?jupyter=true\" style=\"border:none;width:100%;height:420px;display:none;\"></iframe>"
      ],
      "text/plain": [
       "<wandb.sdk.wandb_run.Run at 0x7f3f3719b190>"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Inside my model training code\n",
    "import wandb\n",
    "\n",
    "config = wandb.config          # Initialize config\n",
    "config.batch_size = 32          # input batch size for training (default: 64)\n",
    "config.test_batch_size = 64    # input batch size for testing (default: 1000)\n",
    "config.epochs = 3             # number of epochs to train (default: 10)\n",
    "config.lr = 2e-5               # learning rate (default: 0.01)\n",
    "config.momentum = 0.1          # SGD momentum (default: 0.5) \n",
    "config.no_cuda = False         # disables CUDA training\n",
    "config.bert_path = 'hfl/chinese-roberta-wwm-ext-large'\n",
    "config.exam_name = 'roberta-large'\n",
    "\n",
    "wandb.init(project=\"cped-emo-cls\",entity='qftie',group='bert-cls3',name=config.exam_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd \n",
    "import numpy as np \n",
    "import json, time\n",
    "from tqdm import tqdm \n",
    "from sklearn.metrics import accuracy_score, classification_report, f1_score\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader, TensorDataset \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "from transformers import BertModel, BertConfig, BertTokenizer, AdamW, get_cosine_schedule_with_warmup\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "\n",
    "\n",
    "bert_path = config.bert_path\n",
    "tokenizer = BertTokenizer.from_pretrained(bert_path)   # 初始化分词器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ## 准备多gpu\n",
    "# from accelerate import Accelerator\n",
    "# accelerator = Accelerator(split_batches=True)\n",
    "# DEVICE = accelerator.device\n",
    "# DEVICE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预处理数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # mmmtd版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'中性情绪':0, '正向情绪':1, '负向情绪':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['情绪(粗粒度)']]\n",
    "\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = data_text[data_text['tv']<=26]\n",
    "# data_text_valid = data_text[(data_text['tv']==32) | (data_text['tv']>=39)]\n",
    "# data_text_test = data_text[((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))]\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['说话内容'].tolist()\n",
    "# train_label = data_text_train['Emotion_3'].tolist()\n",
    "\n",
    "# x_valid = data_text_valid['说话内容'].tolist()\n",
    "# valid_label = data_text_valid['Emotion_3'].tolist()\n",
    "\n",
    "# x_test = data_text_test['说话内容'].tolist()\n",
    "# test_label = data_text_test['Emotion_3'].tolist()\n",
    "\n",
    "# train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)\n",
    "# # max_length对文本做截断\n",
    "# valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=120)\n",
    "# test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPED版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "emo2id = {'neutral':0, 'positive':1, 'negative':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# train_index = data_text['tv']<=26\n",
    "# valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "x_train = data_text_train['Utterance'].tolist()\n",
    "train_label = [emo2id[x] for x in data_text_train['Sentiment']]\n",
    "\n",
    "x_valid = data_text_valid['Utterance'].tolist()\n",
    "valid_label = [emo2id[x] for x in data_text_valid['Sentiment']]\n",
    "\n",
    "x_test = data_text_test['Utterance'].tolist()\n",
    "test_label = [emo2id[x] for x in data_text_test['Sentiment']]\n",
    "\n",
    "train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)\n",
    "# max_length对文本做截断\n",
    "valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=120)\n",
    "test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 2, 2, 2, 2, 0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2]\n",
      "[2, 2, 2, 0, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1]\n"
     ]
    }
   ],
   "source": [
    "print(train_label[-500:-300])\n",
    "print(valid_label[-500:-300])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 准备训练集，验证集，测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据集读取 转成dict形式\n",
    "class NewsDataset(Dataset):\n",
    "    def __init__(self, encodings, labels):\n",
    "        self.encodings = encodings\n",
    "        self.labels = labels\n",
    "    \n",
    "    # 读取单个样本\n",
    "    def __getitem__(self, idx):\n",
    "        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n",
    "        item['labels'] = torch.tensor(int(self.labels[idx]))\n",
    "        return item\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.labels)\n",
    "\n",
    "train_dataset = NewsDataset(train_encoding, train_label)\n",
    "valid_dataset = NewsDataset(valid_encoding, valid_label)\n",
    "test_dataset = NewsDataset(test_encoding, test_label)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个读取到批量读取\n",
    "train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载到torch的dataloader"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 定义model\n",
    "class Bert_Model(nn.Module):\n",
    "    def __init__(self, bert_path, classes=3):\n",
    "        super(Bert_Model, self).__init__()\n",
    "        self.config = BertConfig.from_pretrained(bert_path)  # 导入模型超参数\n",
    "        self.bert = BertModel.from_pretrained(bert_path)     # 加载预训练模型权重\n",
    "        self.fc = nn.Linear(self.config.hidden_size, classes)  # 直接分类\n",
    "        self.dense = nn.Linear(self.config.hidden_size, self.config.hidden_size)\n",
    "        self.activation = nn.Tanh()\n",
    "\n",
    "        \n",
    "        \n",
    "    def forward(self, input_ids, attention_mask=None, token_type_ids=None):\n",
    "        outputs = self.bert(input_ids, attention_mask, token_type_ids)\n",
    "        out_pool = outputs[1]   # 池化后的输出 [bs, config.hidden_size]\n",
    "        # out_pool = self.dense(out_pool)\n",
    "        # out_pool = self.activation(out_pool)\n",
    "        logit = self.fc(out_pool)   #  [bs, classes]\n",
    "        return logit"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 实例化bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext-large were not used when initializing BertModel: ['cls.predictions.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total parameters: 326575107, Trainable parameters: 326575107\n"
     ]
    }
   ],
   "source": [
    "def get_parameter_number(model):\n",
    "    #  打印模型参数量\n",
    "    total_num = sum(p.numel() for p in model.parameters())\n",
    "    trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "    return 'Total parameters: {}, Trainable parameters: {}'.format(total_num, trainable_num)\n",
    "\n",
    "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "EPOCHS = config.epochs\n",
    "model = Bert_Model(bert_path)\n",
    "model = nn.DataParallel(model)\n",
    "model = model.cuda()\n",
    "print(get_parameter_number(model))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 优化器定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = AdamW(model.parameters(), lr=config.lr, weight_decay=1e-4) #AdamW优化器\n",
    "scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=len(train_loader),\n",
    "                                            num_training_steps=EPOCHS*len(train_loader))\n",
    "# 学习率先线性warmup一个epoch，然后cosine式下降。\n",
    "# 这里给个小提示，一定要加warmup（学习率从0慢慢升上去），如果把warmup去掉，可能收敛不了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 准备放入多卡环境\n",
    "# model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义训练函数和验证测试函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 评估模型性能，在验证集上\n",
    "def evaluate(model, data_loader, device):\n",
    "    model.eval()\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    val_true, val_pred = [], []\n",
    "    valid_loss_sum = 0.0\n",
    "    with torch.no_grad():\n",
    "        for idx, batch in enumerate(data_loader):\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            loss = criterion(y_pred, batch['labels'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "            val_true.extend(batch['labels'].cpu().numpy().tolist())\n",
    "            valid_loss_sum += loss.item()\n",
    "            \n",
    "    print(classification_report(val_true, val_pred, digits=4))\n",
    "    return accuracy_score(val_true, val_pred), valid_loss_sum/len(data_loader)  #返回accuracy\n",
    "\n",
    "\n",
    "# 测试集没有标签，需要预测提交\n",
    "def predict(model, data_loader, device):\n",
    "    model.eval()\n",
    "    val_pred = []\n",
    "    with torch.no_grad():\n",
    "        for batch in data_loader:\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "    return val_pred\n",
    "\n",
    "\n",
    "def train_and_eval(model, train_loader, valid_loader, \n",
    "                   optimizer, scheduler, device, epoch):\n",
    "    best_acc = 0.0\n",
    "    patience = 0\n",
    "    # b = 0.6\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    for i in range(epoch):\n",
    "        \"\"\"训练模型\"\"\"\n",
    "        start = time.time()\n",
    "        model.train()\n",
    "        print(\"***** Running training epoch {} *****\".format(i+1))\n",
    "        train_loss_sum = 0.0\n",
    "        for idx, batch in enumerate(train_loader):\n",
    "            ids = batch['input_ids'].to(device)\n",
    "            att = batch['attention_mask'].to(device)\n",
    "            tpe = batch['token_type_ids'].to(device)\n",
    "            y = batch['labels'].to(device)  \n",
    "            y_pred = model(ids, att, tpe)\n",
    "            loss = criterion(y_pred, y)\n",
    "            # loss = (loss - b).abs() + b # This is it!\n",
    "            step_lr = np.array([param_group[\"lr\"] for param_group in optimizer.param_groups]).mean()\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            scheduler.step()   # 学习率变化\n",
    "            \n",
    "            train_loss_sum += loss.item()\n",
    "            if (idx + 1) % (len(train_loader)//20) == 0:    # 只打印五次结果\n",
    "                wandb.log({\n",
    "                            'Epoch': i+1, \n",
    "                            'train_loss': loss,\n",
    "                            'lr': step_lr\n",
    "                            })\n",
    "                print(\"Epoch {:04d} | Step {:04d}/{:04d} | Loss {:.4f} | Time {:.4f}\".format(\n",
    "                          i+1, idx+1, len(train_loader), train_loss_sum/(idx+1), time.time() - start))\n",
    "                # print(\"Learning rate = {}\".format(optimizer.state_dict()['param_groups'][0]['lr']))\n",
    "\n",
    "        \"\"\"验证模型\"\"\"\n",
    "        model.eval()\n",
    "        acc, valid_loss = evaluate(model, valid_loader, device)  # 验证模型的性能\n",
    "        wandb.log({'valid_acc': acc, 'valid_loss': valid_loss})\n",
    "        ## 保存最优模型\n",
    "        if acc > best_acc:\n",
    "            best_acc = acc\n",
    "            torch.save(model.state_dict(), config.exam_name+\"-best_bert_model.bin\") \n",
    "        \n",
    "        print(\"current acc is {:.4f}, best acc is {:.4f}\".format(acc, best_acc))\n",
    "        print(\"time costed = {}s \\n\".format(round(time.time() - start, 5)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和验证模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***** Running training epoch 1 *****\n",
      "Epoch 0001 | Step 0147/2944 | Loss 1.0490 | Time 60.2181\n",
      "Epoch 0001 | Step 0294/2944 | Loss 1.0285 | Time 120.6202\n",
      "Epoch 0001 | Step 0441/2944 | Loss 1.0113 | Time 182.6833\n",
      "Epoch 0001 | Step 0588/2944 | Loss 0.9991 | Time 245.1598\n",
      "Epoch 0001 | Step 0735/2944 | Loss 0.9884 | Time 307.3844\n",
      "Epoch 0001 | Step 0882/2944 | Loss 0.9806 | Time 369.5630\n",
      "Epoch 0001 | Step 1029/2944 | Loss 0.9741 | Time 431.7737\n",
      "Epoch 0001 | Step 1176/2944 | Loss 0.9697 | Time 493.7508\n",
      "Epoch 0001 | Step 1323/2944 | Loss 0.9663 | Time 555.7990\n",
      "Epoch 0001 | Step 1470/2944 | Loss 0.9640 | Time 617.9557\n",
      "Epoch 0001 | Step 1617/2944 | Loss 0.9611 | Time 680.4050\n",
      "Epoch 0001 | Step 1764/2944 | Loss 0.9597 | Time 742.9178\n",
      "Epoch 0001 | Step 1911/2944 | Loss 0.9589 | Time 804.7213\n",
      "Epoch 0001 | Step 2058/2944 | Loss 0.9573 | Time 867.3147\n",
      "Epoch 0001 | Step 2205/2944 | Loss 0.9568 | Time 929.5987\n",
      "Epoch 0001 | Step 2352/2944 | Loss 0.9548 | Time 991.5398\n",
      "Epoch 0001 | Step 2499/2944 | Loss 0.9543 | Time 1054.0410\n",
      "Epoch 0001 | Step 2646/2944 | Loss 0.9541 | Time 1116.1679\n",
      "Epoch 0001 | Step 2793/2944 | Loss 0.9529 | Time 1178.4676\n",
      "Epoch 0001 | Step 2940/2944 | Loss 0.9522 | Time 1240.4625\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3905    0.6411    0.4853      3126\n",
      "           1     0.4981    0.0792    0.1367      1692\n",
      "           2     0.7055    0.6404    0.6714      6319\n",
      "\n",
      "    accuracy                         0.5554     11137\n",
      "   macro avg     0.5314    0.4536    0.4311     11137\n",
      "weighted avg     0.5856    0.5554    0.5380     11137\n",
      "\n",
      "current acc is 0.5554, best acc is 0.5554\n",
      "time costed = 1282.65624s \n",
      "\n",
      "***** Running training epoch 2 *****\n",
      "Epoch 0002 | Step 0147/2944 | Loss 0.8925 | Time 61.6962\n",
      "Epoch 0002 | Step 0294/2944 | Loss 0.8918 | Time 123.9504\n",
      "Epoch 0002 | Step 0441/2944 | Loss 0.8871 | Time 186.6052\n",
      "Epoch 0002 | Step 0588/2944 | Loss 0.8899 | Time 248.8857\n",
      "Epoch 0002 | Step 0735/2944 | Loss 0.8974 | Time 311.3871\n",
      "Epoch 0002 | Step 0882/2944 | Loss 0.9002 | Time 373.4052\n",
      "Epoch 0002 | Step 1029/2944 | Loss 0.8990 | Time 435.1673\n",
      "Epoch 0002 | Step 1176/2944 | Loss 0.8991 | Time 497.2622\n",
      "Epoch 0002 | Step 1323/2944 | Loss 0.8984 | Time 559.4947\n",
      "Epoch 0002 | Step 1470/2944 | Loss 0.8985 | Time 623.7532\n",
      "Epoch 0002 | Step 1617/2944 | Loss 0.8995 | Time 694.4160\n",
      "Epoch 0002 | Step 1764/2944 | Loss 0.9005 | Time 766.4106\n",
      "Epoch 0002 | Step 1911/2944 | Loss 0.9014 | Time 833.5482\n",
      "Epoch 0002 | Step 2058/2944 | Loss 0.9003 | Time 900.1092\n",
      "Epoch 0002 | Step 2205/2944 | Loss 0.8994 | Time 965.7528\n",
      "Epoch 0002 | Step 2352/2944 | Loss 0.8982 | Time 1031.4312\n",
      "Epoch 0002 | Step 2499/2944 | Loss 0.8982 | Time 1097.4122\n",
      "Epoch 0002 | Step 2646/2944 | Loss 0.8987 | Time 1162.6144\n",
      "Epoch 0002 | Step 2793/2944 | Loss 0.8986 | Time 1228.3316\n",
      "Epoch 0002 | Step 2940/2944 | Loss 0.8990 | Time 1299.2067\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4176    0.4802    0.4467      3126\n",
      "           1     0.3932    0.1838    0.2505      1692\n",
      "           2     0.6856    0.7326    0.7083      6319\n",
      "\n",
      "    accuracy                         0.5783     11137\n",
      "   macro avg     0.4988    0.4655    0.4685     11137\n",
      "weighted avg     0.5659    0.5783    0.5653     11137\n",
      "\n",
      "current acc is 0.5783, best acc is 0.5783\n",
      "time costed = 1369.9515s \n",
      "\n",
      "***** Running training epoch 3 *****\n",
      "Epoch 0003 | Step 0147/2944 | Loss 0.7760 | Time 67.5345\n",
      "Epoch 0003 | Step 0294/2944 | Loss 0.7922 | Time 133.5736\n",
      "Epoch 0003 | Step 0441/2944 | Loss 0.7945 | Time 199.4586\n",
      "Epoch 0003 | Step 0588/2944 | Loss 0.7933 | Time 265.0930\n",
      "Epoch 0003 | Step 0735/2944 | Loss 0.7887 | Time 330.4277\n",
      "Epoch 0003 | Step 0882/2944 | Loss 0.7874 | Time 396.4314\n",
      "Epoch 0003 | Step 1029/2944 | Loss 0.7832 | Time 462.5678\n",
      "Epoch 0003 | Step 1176/2944 | Loss 0.7810 | Time 528.2023\n",
      "Epoch 0003 | Step 1323/2944 | Loss 0.7780 | Time 594.1318\n",
      "Epoch 0003 | Step 1470/2944 | Loss 0.7753 | Time 659.7138\n",
      "Epoch 0003 | Step 1617/2944 | Loss 0.7735 | Time 724.7754\n",
      "Epoch 0003 | Step 1764/2944 | Loss 0.7721 | Time 790.5525\n",
      "Epoch 0003 | Step 1911/2944 | Loss 0.7712 | Time 856.6024\n",
      "Epoch 0003 | Step 2058/2944 | Loss 0.7696 | Time 922.3496\n",
      "Epoch 0003 | Step 2205/2944 | Loss 0.7681 | Time 988.2451\n",
      "Epoch 0003 | Step 2352/2944 | Loss 0.7675 | Time 1053.9301\n",
      "Epoch 0003 | Step 2499/2944 | Loss 0.7664 | Time 1119.1262\n",
      "Epoch 0003 | Step 2646/2944 | Loss 0.7658 | Time 1185.3410\n",
      "Epoch 0003 | Step 2793/2944 | Loss 0.7639 | Time 1255.3592\n",
      "Epoch 0003 | Step 2940/2944 | Loss 0.7633 | Time 1330.6882\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4186    0.4674    0.4417      3126\n",
      "           1     0.3564    0.2311    0.2804      1692\n",
      "           2     0.6936    0.7189    0.7060      6319\n",
      "\n",
      "    accuracy                         0.5742     11137\n",
      "   macro avg     0.4895    0.4725    0.4760     11137\n",
      "weighted avg     0.5652    0.5742    0.5672     11137\n",
      "\n",
      "current acc is 0.5742, best acc is 0.5783\n",
      "time costed = 1372.39482s \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<br/>Waiting for W&B process to finish, PID 20804... <strong style=\"color:green\">(success).</strong>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3c1c9186ae3d4315b421a3e99810199e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "VBox(children=(Label(value=' 0.00MB of 0.00MB uploaded (0.00MB deduped)\\r'), FloatProgress(value=1.0, max=1.0)…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<style>\n",
       "    table.wandb td:nth-child(1) { padding: 0 10px; text-align: right }\n",
       "    .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; width: 100% }\n",
       "    .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
       "    </style>\n",
       "<div class=\"wandb-row\"><div class=\"wandb-col\">\n",
       "<h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>▁▁▁▁▁▁▁▁▁▁▁▁▁▁▅▅▅▅▅▅▅▅▅▅▅▅▅█████████████</td></tr><tr><td>lr</td><td>▁▂▂▃▃▄▄▅▆▆▇▇██████▇▇▇▇▆▆▅▅▅▄▄▃▃▃▂▂▂▂▁▁▁▁</td></tr><tr><td>train_loss</td><td>▆▅▅▅▅▃▄▇▇▇▄▆▅▆▆█▄▅▄▆▆▃▄▇▄▆▅▁▅▃▂▂▄▅▃▄▃▃▃▅</td></tr><tr><td>valid_acc</td><td>▁█▇</td></tr><tr><td>valid_loss</td><td>▄▁█</td></tr></table><br/></div><div class=\"wandb-col\">\n",
       "<h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>3</td></tr><tr><td>lr</td><td>0.0</td></tr><tr><td>train_loss</td><td>0.89875</td></tr><tr><td>valid_acc</td><td>0.57421</td></tr><tr><td>valid_loss</td><td>0.94784</td></tr></table>\n",
       "</div></div>\n",
       "Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)\n",
       "<br/>Synced <strong style=\"color:#cdcd00\">roberta-large</strong>: <a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/1nxi08ij\" target=\"_blank\">https://wandb.ai/qftie/cped-emo-cls/runs/1nxi08ij</a><br/>\n",
       "Find logs at: <code>./wandb/run-20211123_092310-1nxi08ij/logs</code><br/>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 训练和验证评估\n",
    "train_and_eval(model, train_loader, valid_loader, optimizer, scheduler, DEVICE, EPOCHS)\n",
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载最优模型测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3815    0.4906    0.4292      7991\n",
      "           1     0.4663    0.2168    0.2960      6470\n",
      "           2     0.5964    0.6505    0.6223     12977\n",
      "\n",
      "    accuracy                         0.5017     27438\n",
      "   macro avg     0.4814    0.4526    0.4492     27438\n",
      "weighted avg     0.5031    0.5017    0.4891     27438\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 加载最优权重对测试集测试\n",
    "model.load_state_dict(torch.load(config.exam_name+\"-best_bert_model.bin\"))\n",
    "pred_test = evaluate(model, test_loader, DEVICE)\n",
    "# print(\"\\n Test Accuracy = {} \\n\".format(accuracy_score(test_label, pred_test)))\n",
    "# print(classification_report(test_label, pred_test, digits=4))\n"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "49e5bd26eb5fb0a8ffb649f62262c127e261ba51230dc2578599ab5938abf7ca"
  },
  "kernelspec": {
   "display_name": "Python 3.8.0 64-bit ('torch17py38': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
