{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /home/phd-fan.weiquan2/.netrc\n"
     ]
    }
   ],
   "source": [
    "!wandb login fef2375a2e5bbeff568e67838b427f129b7fa678"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='0,1,5,6'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mqftie\u001b[0m (use `wandb login --relogin` to force relogin)\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "                    Syncing run <strong><a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/30ns8frx\" target=\"_blank\">floral-plasma-57</a></strong> to <a href=\"https://wandb.ai/qftie/cped-emo-cls\" target=\"_blank\">Weights & Biases</a> (<a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">docs</a>).<br/>\n",
       "\n",
       "                "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# Inside my model training code\n",
    "import wandb\n",
    "wandb.init(project=\"cped-emo-cls\",entity='qftie',group='bert-cls3')\n",
    "\n",
    "\n",
    "config = wandb.config          # Initialize config\n",
    "config.batch_size = 32          # input batch size for training (default: 64)\n",
    "config.test_batch_size = 26    # input batch size for testing (default: 1000)\n",
    "config.epochs = 3             # number of epochs to train (default: 10)\n",
    "config.lr = 2e-5               # learning rate (default: 0.01)\n",
    "config.momentum = 0.1          # SGD momentum (default: 0.5) \n",
    "config.no_cuda = False         # disables CUDA training\n",
    "config.bert_path = 'hfl/chinese-roberta-wwm-ext'\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd \n",
    "import numpy as np \n",
    "import json, time\n",
    "from tqdm import tqdm \n",
    "from sklearn.metrics import accuracy_score, classification_report, f1_score\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader, TensorDataset \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "from transformers import BertModel, BertConfig, AutoTokenizer, AdamW, get_cosine_schedule_with_warmup\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "bert_path = config.bert_path\n",
    "tokenizer = AutoTokenizer.from_pretrained(bert_path)   # 初始化分词器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ## 准备多gpu\n",
    "# from accelerate import Accelerator\n",
    "# accelerator = Accelerator(split_batches=True)\n",
    "# DEVICE = accelerator.device\n",
    "# DEVICE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预处理数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # mmmtd版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'中性情绪':0, '正向情绪':1, '负向情绪':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['情绪(粗粒度)']]\n",
    "\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = data_text[data_text['tv']<=26]\n",
    "# data_text_valid = data_text[(data_text['tv']==32) | (data_text['tv']>=39)]\n",
    "# data_text_test = data_text[((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))]\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['说话内容'].tolist()\n",
    "# train_label = data_text_train['Emotion_3'].tolist()\n",
    "\n",
    "# x_valid = data_text_valid['说话内容'].tolist()\n",
    "# valid_label = data_text_valid['Emotion_3'].tolist()\n",
    "\n",
    "# x_test = data_text_test['说话内容'].tolist()\n",
    "# test_label = data_text_test['Emotion_3'].tolist()\n",
    "\n",
    "# train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)\n",
    "# # max_length对文本做截断\n",
    "# valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=120)\n",
    "# test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPED版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "emo2id = {'neutral':0, 'positive':1, 'negative':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# train_index = data_text['tv']<=26\n",
    "# valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "x_train = data_text_train['Utterance'].tolist()\n",
    "train_label = [emo2id[x] for x in data_text_train['Sentiment']]\n",
    "\n",
    "x_valid = data_text_valid['Utterance'].tolist()\n",
    "valid_label = [emo2id[x] for x in data_text_valid['Sentiment']]\n",
    "\n",
    "x_test = data_text_test['Utterance'].tolist()\n",
    "test_label = [emo2id[x] for x in data_text_test['Sentiment']]\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 准备forward utterance作为sentence1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# back_stride = int(data_text_valid['Utterance_ID'][50][-3:])\n",
    "# ' '.join(data_text_valid['Utterance'][ 50- back_stride: 50].tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "utt_forward_train = list()\n",
    "for i in range(len(data_text_train)):\n",
    "    back_stride = int(data_text_train['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_train['Utterance'][ i- back_stride: i].tolist())\n",
    "    utt_forward_train.append(utt_forward)\n",
    "\n",
    "utt_forward_valid = list()\n",
    "for i in range(len(data_text_valid)):\n",
    "    back_stride = int(data_text_valid['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_valid['Utterance'][ i- back_stride: i].tolist())\n",
    "    utt_forward_valid.append(utt_forward)\n",
    "\n",
    "utt_forward_test = list()\n",
    "for i in range(len(data_text_test)):\n",
    "    back_stride = int(data_text_test['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_test['Utterance'][ i- back_stride: i].tolist())\n",
    "    utt_forward_test.append(utt_forward)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['你是不是真的不知道现在高几了',\n",
       " '你不知道我告诉你',\n",
       " '你现在高三了',\n",
       " '不再是高一高二了',\n",
       " '你说',\n",
       " '你说你 天天天天',\n",
       " '学习不灵，打架门清，一个暑假我给你报个补习班',\n",
       " '原指望你好好学习能上去',\n",
       " '结果断崖式的下滑',\n",
       " '你对得起我吗']"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_train[30:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班.原指望你好好学习能上去',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班.原指望你好好学习能上去.结果断崖式的下滑']"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "utt_forward_train[30:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(x_valid_forward_utt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tokenize"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [[101, 872, 3221, 6443, 102, 872, 3221, 6443, 1557, 102], [101, 2769, 6432, 102, 2769, 6432, 872, 102, 0, 0], [101, 2523, 2487, 102, 2523, 2487, 102, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]}"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer(['你是谁','我说','很强'],['你是谁啊','我说你','很强'],padding=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_encoding = tokenizer(x_train, utt_forward_train, truncation=True, padding=True, max_length=512)\n",
    "# max_length对文本做截断\n",
    "valid_encoding = tokenizer(x_valid, utt_forward_valid, truncation=True, padding=True, max_length=512)\n",
    "test_encoding = tokenizer(x_test, utt_forward_test, truncation=True, padding=True, max_length=512)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 准备训练集，验证集，测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据集读取 转成dict形式\n",
    "class NewsDataset(Dataset):\n",
    "    def __init__(self, encodings, labels):\n",
    "        self.encodings = encodings\n",
    "        self.labels = labels\n",
    "    \n",
    "    # 读取单个样本\n",
    "    def __getitem__(self, idx):\n",
    "        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n",
    "        item['labels'] = torch.tensor(int(self.labels[idx]))\n",
    "        return item\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.labels)\n",
    "\n",
    "train_dataset = NewsDataset(train_encoding, train_label)\n",
    "valid_dataset = NewsDataset(valid_encoding, valid_label)\n",
    "test_dataset = NewsDataset(test_encoding, test_label)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个读取到批量读取\n",
    "train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载到torch的dataloader"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 定义model\n",
    "class Bert_Model(nn.Module):\n",
    "    def __init__(self, bert_path, classes=3):\n",
    "        super(Bert_Model, self).__init__()\n",
    "        self.config = BertConfig.from_pretrained(bert_path)  # 导入模型超参数\n",
    "        self.bert = BertModel.from_pretrained(bert_path)     # 加载预训练模型权重\n",
    "        self.fc = nn.Linear(self.config.hidden_size, classes)  # 直接分类\n",
    "        self.dense = nn.Linear(self.config.hidden_size, self.config.hidden_size)\n",
    "        self.activation = nn.Tanh()\n",
    "\n",
    "        \n",
    "        \n",
    "    def forward(self, input_ids, attention_mask=None, token_type_ids=None):\n",
    "        outputs = self.bert(input_ids, attention_mask, token_type_ids)\n",
    "        out_pool = outputs[1]   # 池化后的输出 [bs, config.hidden_size]\n",
    "        # out_pool = self.dense(out_pool)\n",
    "        # out_pool = self.activation(out_pool)\n",
    "        logit = self.fc(out_pool)   #  [bs, classes]\n",
    "        return logit"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 实例化bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext were not used when initializing BertModel: ['cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total parameters: 102860547, Trainable parameters: 102860547\n"
     ]
    }
   ],
   "source": [
    "def get_parameter_number(model):\n",
    "    #  打印模型参数量\n",
    "    total_num = sum(p.numel() for p in model.parameters())\n",
    "    trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "    return 'Total parameters: {}, Trainable parameters: {}'.format(total_num, trainable_num)\n",
    "\n",
    "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "EPOCHS = config.epochs\n",
    "model = Bert_Model(bert_path)\n",
    "model = nn.DataParallel(model)\n",
    "model = model.cuda()\n",
    "print(get_parameter_number(model))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 优化器定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = AdamW(model.parameters(), lr=config.lr, weight_decay=1e-4) #AdamW优化器\n",
    "scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=len(train_loader),\n",
    "                                            num_training_steps=EPOCHS*len(train_loader))\n",
    "# 学习率先线性warmup一个epoch，然后cosine式下降。\n",
    "# 这里给个小提示，一定要加warmup（学习率从0慢慢升上去），如果把warmup去掉，可能收敛不了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 准备放入多卡环境\n",
    "# model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义训练函数和验证测试函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 评估模型性能，在验证集上\n",
    "def evaluate(model, data_loader, device):\n",
    "    model.eval()\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    val_true, val_pred = [], []\n",
    "    valid_loss_sum = 0.0\n",
    "    with torch.no_grad():\n",
    "        for idx, batch in enumerate(data_loader):\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            loss = criterion(y_pred, batch['labels'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "            val_true.extend(batch['labels'].cpu().numpy().tolist())\n",
    "            valid_loss_sum += loss.item()\n",
    "            \n",
    "    print(classification_report(val_true, val_pred, digits=4))\n",
    "    return accuracy_score(val_true, val_pred), valid_loss_sum/len(data_loader)  #返回accuracy\n",
    "\n",
    "\n",
    "# 测试集没有标签，需要预测提交\n",
    "def predict(model, data_loader, device):\n",
    "    model.eval()\n",
    "    val_pred = []\n",
    "    with torch.no_grad():\n",
    "        for batch in data_loader:\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "    return val_pred\n",
    "\n",
    "\n",
    "def train_and_eval(model, train_loader, valid_loader, \n",
    "                   optimizer, scheduler, device, epoch):\n",
    "    best_acc = 0.0\n",
    "    patience = 0\n",
    "    best_loss = 100\n",
    "    b = 0.6\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    for i in range(epoch):\n",
    "        \"\"\"训练模型\"\"\"\n",
    "        start = time.time()\n",
    "        model.train()\n",
    "        print(\"***** Running training epoch {} *****\".format(i+1))\n",
    "        train_loss_sum = 0.0\n",
    "        for idx, batch in enumerate(train_loader):\n",
    "            ids = batch['input_ids'].to(device)\n",
    "            att = batch['attention_mask'].to(device)\n",
    "            tpe = batch['token_type_ids'].to(device)\n",
    "            y = batch['labels'].to(device)  \n",
    "            y_pred = model(ids, att, tpe)\n",
    "            loss = criterion(y_pred, y)\n",
    "            loss = (loss - b).abs() + b # This is it!\n",
    "            step_lr = np.array([param_group[\"lr\"] for param_group in optimizer.param_groups]).mean()\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            scheduler.step()   # 学习率变化\n",
    "            \n",
    "            train_loss_sum += loss.item()\n",
    "            if (idx + 1) % (len(train_loader)//20) == 0:    # 只打印五次结果\n",
    "                wandb.log({\n",
    "                            'Epoch': i+1, \n",
    "                            'train_loss': loss,\n",
    "                            'lr': step_lr\n",
    "                            })\n",
    "                print(\"Epoch {:04d} | Step {:04d}/{:04d} | Loss {:.4f} | Time {:.4f}\".format(\n",
    "                          i+1, idx+1, len(train_loader), train_loss_sum/(idx+1), time.time() - start))\n",
    "                # print(\"Learning rate = {}\".format(optimizer.state_dict()['param_groups'][0]['lr']))\n",
    "\n",
    "        \"\"\"验证模型\"\"\"\n",
    "        model.eval()\n",
    "        acc, valid_loss = evaluate(model, valid_loader, device)  # 验证模型的性能\n",
    "        wandb.log({'valid_acc': acc, 'valid_loss': valid_loss})\n",
    "        ## 保存最优模型\n",
    "        if valid_loss < best_loss:\n",
    "            best_loss = valid_loss\n",
    "            torch.save(model.state_dict(), \"best_bert_model.pth\") \n",
    "        \n",
    "        print(\"current acc is {:.4f}, best acc is {:.4f}\".format(acc, best_acc))\n",
    "        print(\"time costed = {}s \\n\".format(round(time.time() - start, 5)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和验证模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***** Running training epoch 1 *****\n",
      "Epoch 0001 | Step 0147/2944 | Loss 1.0676 | Time 115.6512\n",
      "Epoch 0001 | Step 0294/2944 | Loss 1.0363 | Time 218.4924\n",
      "Epoch 0001 | Step 0441/2944 | Loss 1.0168 | Time 323.2481\n",
      "Epoch 0001 | Step 0588/2944 | Loss 0.9973 | Time 426.7891\n",
      "Epoch 0001 | Step 0735/2944 | Loss 0.9796 | Time 529.5440\n",
      "Epoch 0001 | Step 0882/2944 | Loss 0.9653 | Time 633.4753\n",
      "Epoch 0001 | Step 1029/2944 | Loss 0.9544 | Time 736.3715\n",
      "Epoch 0001 | Step 1176/2944 | Loss 0.9444 | Time 837.1032\n",
      "Epoch 0001 | Step 1323/2944 | Loss 0.9348 | Time 939.0347\n",
      "Epoch 0001 | Step 1470/2944 | Loss 0.9267 | Time 1042.1054\n",
      "Epoch 0001 | Step 1617/2944 | Loss 0.9195 | Time 1145.7515\n",
      "Epoch 0001 | Step 1764/2944 | Loss 0.9122 | Time 1247.3621\n",
      "Epoch 0001 | Step 1911/2944 | Loss 0.9064 | Time 1349.7598\n",
      "Epoch 0001 | Step 2058/2944 | Loss 0.9008 | Time 1454.8192\n",
      "Epoch 0001 | Step 2205/2944 | Loss 0.8954 | Time 1558.1896\n",
      "Epoch 0001 | Step 2352/2944 | Loss 0.8896 | Time 1659.4119\n",
      "Epoch 0001 | Step 2499/2944 | Loss 0.8840 | Time 1761.3876\n",
      "Epoch 0001 | Step 2646/2944 | Loss 0.8799 | Time 1865.4003\n",
      "Epoch 0001 | Step 2793/2944 | Loss 0.8760 | Time 1965.8802\n",
      "Epoch 0001 | Step 2940/2944 | Loss 0.8715 | Time 2070.6971\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4755    0.3071    0.3732      3126\n",
      "           1     0.3738    0.2583    0.3055      1692\n",
      "           2     0.6725    0.8460    0.7494      6319\n",
      "\n",
      "    accuracy                         0.6055     11137\n",
      "   macro avg     0.5073    0.4705    0.4760     11137\n",
      "weighted avg     0.5718    0.6055    0.5763     11137\n",
      "\n",
      "current acc is 0.6055, best acc is 0.0000\n",
      "time costed = 2166.20214s \n",
      "\n",
      "***** Running training epoch 2 *****\n",
      "Epoch 0002 | Step 0147/2944 | Loss 0.7325 | Time 103.1496\n",
      "Epoch 0002 | Step 0294/2944 | Loss 0.7352 | Time 206.4355\n",
      "Epoch 0002 | Step 0441/2944 | Loss 0.7354 | Time 307.9583\n",
      "Epoch 0002 | Step 0588/2944 | Loss 0.7289 | Time 413.4988\n",
      "Epoch 0002 | Step 0735/2944 | Loss 0.7262 | Time 514.4656\n",
      "Epoch 0002 | Step 0882/2944 | Loss 0.7252 | Time 617.1229\n",
      "Epoch 0002 | Step 1029/2944 | Loss 0.7239 | Time 719.8431\n",
      "Epoch 0002 | Step 1176/2944 | Loss 0.7225 | Time 818.5918\n",
      "Epoch 0002 | Step 1323/2944 | Loss 0.7213 | Time 918.6444\n",
      "Epoch 0002 | Step 1470/2944 | Loss 0.7206 | Time 1022.8966\n",
      "Epoch 0002 | Step 1617/2944 | Loss 0.7194 | Time 1124.9497\n",
      "Epoch 0002 | Step 1764/2944 | Loss 0.7178 | Time 1228.6726\n",
      "Epoch 0002 | Step 1911/2944 | Loss 0.7176 | Time 1333.4385\n",
      "Epoch 0002 | Step 2058/2944 | Loss 0.7168 | Time 1434.5431\n",
      "Epoch 0002 | Step 2205/2944 | Loss 0.7158 | Time 1537.6920\n",
      "Epoch 0002 | Step 2352/2944 | Loss 0.7148 | Time 1638.9866\n",
      "Epoch 0002 | Step 2499/2944 | Loss 0.7137 | Time 1741.9882\n",
      "Epoch 0002 | Step 2646/2944 | Loss 0.7127 | Time 1843.2408\n",
      "Epoch 0002 | Step 2793/2944 | Loss 0.7114 | Time 1946.1850\n",
      "Epoch 0002 | Step 2940/2944 | Loss 0.7109 | Time 2049.2927\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4213    0.4792    0.4484      3126\n",
      "           1     0.3303    0.4048    0.3638      1692\n",
      "           2     0.7449    0.6492    0.6937      6319\n",
      "\n",
      "    accuracy                         0.5643     11137\n",
      "   macro avg     0.4988    0.5111    0.5020     11137\n",
      "weighted avg     0.5911    0.5643    0.5747     11137\n",
      "\n",
      "current acc is 0.5643, best acc is 0.0000\n",
      "time costed = 2144.37514s \n",
      "\n",
      "***** Running training epoch 3 *****\n",
      "Epoch 0003 | Step 0147/2944 | Loss 0.6731 | Time 101.9617\n",
      "Epoch 0003 | Step 0294/2944 | Loss 0.6727 | Time 206.9876\n",
      "Epoch 0003 | Step 0441/2944 | Loss 0.6702 | Time 310.6137\n",
      "Epoch 0003 | Step 0588/2944 | Loss 0.6681 | Time 412.5824\n",
      "Epoch 0003 | Step 0735/2944 | Loss 0.6677 | Time 515.6878\n",
      "Epoch 0003 | Step 0882/2944 | Loss 0.6677 | Time 618.6162\n",
      "Epoch 0003 | Step 1029/2944 | Loss 0.6674 | Time 718.3221\n",
      "Epoch 0003 | Step 1176/2944 | Loss 0.6667 | Time 821.1218\n",
      "Epoch 0003 | Step 1323/2944 | Loss 0.6661 | Time 922.9347\n",
      "Epoch 0003 | Step 1470/2944 | Loss 0.6660 | Time 1024.5813\n",
      "Epoch 0003 | Step 1617/2944 | Loss 0.6659 | Time 1126.8402\n",
      "Epoch 0003 | Step 1764/2944 | Loss 0.6658 | Time 1227.3394\n",
      "Epoch 0003 | Step 1911/2944 | Loss 0.6651 | Time 1332.3137\n",
      "Epoch 0003 | Step 2058/2944 | Loss 0.6648 | Time 1435.7084\n",
      "Epoch 0003 | Step 2205/2944 | Loss 0.6642 | Time 1538.6497\n",
      "Epoch 0003 | Step 2352/2944 | Loss 0.6640 | Time 1643.0094\n",
      "Epoch 0003 | Step 2499/2944 | Loss 0.6640 | Time 1742.4746\n",
      "Epoch 0003 | Step 2646/2944 | Loss 0.6642 | Time 1845.4570\n",
      "Epoch 0003 | Step 2793/2944 | Loss 0.6642 | Time 1949.0829\n",
      "Epoch 0003 | Step 2940/2944 | Loss 0.6641 | Time 2051.7494\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4303    0.4894    0.4579      3126\n",
      "           1     0.3657    0.2961    0.3272      1692\n",
      "           2     0.7216    0.7093    0.7154      6319\n",
      "\n",
      "    accuracy                         0.5848     11137\n",
      "   macro avg     0.5059    0.4983    0.5002     11137\n",
      "weighted avg     0.5858    0.5848    0.5842     11137\n",
      "\n",
      "current acc is 0.5848, best acc is 0.0000\n",
      "time costed = 2147.86795s \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<br/>Waiting for W&B process to finish, PID 390... <strong style=\"color:green\">(success).</strong>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "2fc2b993ed5a477a9e6c61cb06ee4837",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "VBox(children=(Label(value=' 0.00MB of 0.00MB uploaded (0.00MB deduped)\\r'), FloatProgress(value=1.0, max=1.0)…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<style>\n",
       "    table.wandb td:nth-child(1) { padding: 0 10px; text-align: right }\n",
       "    .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; width: 100% }\n",
       "    .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
       "    </style>\n",
       "<div class=\"wandb-row\"><div class=\"wandb-col\">\n",
       "<h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>▁▁▁▁▁▁▁▁▁▁▁▁▁▁▅▅▅▅▅▅▅▅▅▅▅▅▅█████████████</td></tr><tr><td>lr</td><td>▁▂▂▃▃▄▄▅▆▆▇▇██████▇▇▇▇▆▆▅▅▅▄▄▃▃▃▂▂▂▂▁▁▁▁</td></tr><tr><td>train_loss</td><td>▅█▅▆▃▄▂▃▅▄▄▃▂▅▃▂▄▃▁▁▂▁▁▂▁▁▁▂▁▁▂▁▂▁▁▂▁▂▂▂</td></tr><tr><td>valid_acc</td><td>█▁▄</td></tr><tr><td>valid_loss</td><td>▁█▂</td></tr></table><br/></div><div class=\"wandb-col\">\n",
       "<h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>3</td></tr><tr><td>lr</td><td>0.0</td></tr><tr><td>train_loss</td><td>0.65915</td></tr><tr><td>valid_acc</td><td>0.58481</td></tr><tr><td>valid_loss</td><td>0.89097</td></tr></table>\n",
       "</div></div>\n",
       "Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)\n",
       "<br/>Synced <strong style=\"color:#cdcd00\">floral-plasma-57</strong>: <a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/30ns8frx\" target=\"_blank\">https://wandb.ai/qftie/cped-emo-cls/runs/30ns8frx</a><br/>\n",
       "Find logs at: <code>./wandb/run-20211119_160325-30ns8frx/logs</code><br/>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 训练和验证评估\n",
    "train_and_eval(model, train_loader, valid_loader, optimizer, scheduler, DEVICE, EPOCHS)\n",
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载最优模型测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4295    0.3068    0.3580      7991\n",
      "           1     0.4519    0.3402    0.3881      6470\n",
      "           2     0.5879    0.7637    0.6644     12977\n",
      "\n",
      "    accuracy                         0.5308     27438\n",
      "   macro avg     0.4898    0.4703    0.4702     27438\n",
      "weighted avg     0.5097    0.5308    0.5100     27438\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 加载最优权重对测试集测试\n",
    "model.load_state_dict(torch.load(\"best_bert_model.pth\"))\n",
    "pred_test = evaluate(model, test_loader, DEVICE)\n",
    "# print(\"\\n Test Accuracy = {} \\n\".format(accuracy_score(test_label, pred_test)))\n",
    "# print(classification_report(test_label, pred_test, digits=4))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "output_dir = 'output/ch-roberta-dorwardUt'\n",
    "os.makedirs(output_dir, exist_ok=True)\n",
    "torch.save(model.state_dict(), output_dir+\"/pytorch_model.bin\")\n",
    "# torch.save(model, \"pytorch_model_whole.bin\")"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "49e5bd26eb5fb0a8ffb649f62262c127e261ba51230dc2578599ab5938abf7ca"
  },
  "kernelspec": {
   "display_name": "Python 3.8.0 64-bit ('torch17py38': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
