{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Task: Next Sentence Emotion Prediction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /home/phd-fan.weiquan2/.netrc\n"
     ]
    }
   ],
   "source": [
    "!wandb login fef2375a2e5bbeff568e67838b427f129b7fa678"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='2,3'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mqftie\u001b[0m (use `wandb login --relogin` to force relogin)\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: wandb version 0.12.9 is available!  To upgrade, please run:\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m:  $ pip install wandb --upgrade\n",
      "2022-01-11 15:18:33.845058: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-01-11 15:18:33.845100: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "                    Syncing run <strong><a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/3b5omku0\" target=\"_blank\">roberta-forwardUt</a></strong> to <a href=\"https://wandb.ai/qftie/cped-emo-cls\" target=\"_blank\">Weights & Biases</a> (<a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">docs</a>).<br/>\n",
       "\n",
       "                "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<button onClick=\"this.nextSibling.style.display='block';this.style.display='none';\">Display W&B run</button><iframe src=\"https://wandb.ai/qftie/cped-emo-cls/runs/3b5omku0?jupyter=true\" style=\"border:none;width:100%;height:420px;display:none;\"></iframe>"
      ],
      "text/plain": [
       "<wandb.sdk.wandb_run.Run at 0x7f2477a4d820>"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Inside my model training code\n",
    "import wandb\n",
    "\n",
    "\n",
    "\n",
    "config = wandb.config          # Initialize config\n",
    "config.batch_size = 16          # input batch size for training (default: 64)\n",
    "config.test_batch_size = 26    # input batch size for testing (default: 1000)\n",
    "config.epochs = 2             # number of epochs to train (default: 10)\n",
    "config.lr = 2e-5               # learning rate (default: 0.01)\n",
    "config.momentum = 0.1          # SGD momentum (default: 0.5) \n",
    "config.no_cuda = False         # disables CUDA training\n",
    "config.bert_path = 'hfl/chinese-roberta-wwm-ext'\n",
    "config.exam_name = 'roberta-forwardUt'\n",
    "config.max_seq_len = 512\n",
    "\n",
    "wandb.init(project=\"cped-emo-cls\",entity='qftie',group='bert-cls3-nsep',name=config.exam_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-01-11 15:18:40.236257: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-01-11 15:18:40.236285: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd \n",
    "import numpy as np \n",
    "import json, time\n",
    "from tqdm import tqdm \n",
    "from sklearn.metrics import accuracy_score, classification_report, f1_score\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader, TensorDataset \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "from transformers import BertModel, BertConfig, AutoTokenizer, AdamW, get_cosine_schedule_with_warmup\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "bert_path = config.bert_path\n",
    "tokenizer = AutoTokenizer.from_pretrained(bert_path)   # 初始化分词器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ## 准备多gpu\n",
    "# from accelerate import Accelerator\n",
    "# accelerator = Accelerator(split_batches=True)\n",
    "# DEVICE = accelerator.device\n",
    "# DEVICE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预处理数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # mmmtd版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'中性情绪':0, '正向情绪':1, '负向情绪':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['情绪(粗粒度)']]\n",
    "\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = data_text[data_text['tv']<=26]\n",
    "# data_text_valid = data_text[(data_text['tv']==32) | (data_text['tv']>=39)]\n",
    "# data_text_test = data_text[((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))]\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['说话内容'].tolist()\n",
    "# train_label = data_text_train['Emotion_3'].tolist()\n",
    "\n",
    "# x_valid = data_text_valid['说话内容'].tolist()\n",
    "# valid_label = data_text_valid['Emotion_3'].tolist()\n",
    "\n",
    "# x_test = data_text_test['说话内容'].tolist()\n",
    "# test_label = data_text_test['Emotion_3'].tolist()\n",
    "\n",
    "# train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)\n",
    "# # max_length对文本做截断\n",
    "# valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=120)\n",
    "# test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPED版本数据集，使用中性，正向，负向三分类\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "emo2id = {'neutral':0, 'positive':1, 'negative':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# train_index = data_text['tv']<=26\n",
    "# valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "x_train = data_text_train['Utterance'].tolist()\n",
    "train_label = [emo2id[x] for x in data_text_train['Sentiment']]\n",
    "\n",
    "x_valid = data_text_valid['Utterance'].tolist()\n",
    "valid_label = [emo2id[x] for x in data_text_valid['Sentiment']]\n",
    "\n",
    "x_test = data_text_test['Utterance'].tolist()\n",
    "test_label = [emo2id[x] for x in data_text_test['Sentiment']]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # CPED版本数据集，使用13分类映射后的7分类\n",
    "# # data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# # data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'happy':0,'grateful':0, 'relaxed':0, 'positive-other':0, 'neutral':1, 'anger':2, 'sadness':3, 'depress':3, 'fear':4, 'worried':4, 'astonished':5, 'disgust':6, 'negative-other':6}\n",
    "# # data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "# data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "# data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['Utterance'].tolist()\n",
    "# train_label = [emo2id[x] for x in data_text_train['Emotion']]\n",
    "\n",
    "# x_valid = data_text_valid['Utterance'].tolist()\n",
    "# valid_label = [emo2id[x] for x in data_text_valid['Emotion']]\n",
    "\n",
    "# x_test = data_text_test['Utterance'].tolist()\n",
    "# test_label = [emo2id[x] for x in data_text_test['Emotion']]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 准备forward utterance作为sentence1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# back_stride = int(data_text_valid['Utterance_ID'][50][-3:])\n",
    "# ' '.join(data_text_valid['Utterance'][ 50- back_stride: 50].tolist())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_len = config.max_seq_len\n",
    "\n",
    "utt_forward_train = []\n",
    "for i in range(len(data_text_train)):\n",
    "    back_stride = int(data_text_train['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_train['Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_train['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]\n",
    "    utt_forward_train.append(utt_forward)\n",
    "\n",
    "utt_forward_valid = []\n",
    "for i in range(len(data_text_valid)):\n",
    "    back_stride = int(data_text_valid['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_valid['Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_valid['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]\n",
    "    utt_forward_valid.append(utt_forward)\n",
    "\n",
    "utt_forward_test = []\n",
    "for i in range(len(data_text_test)):\n",
    "    back_stride = int(data_text_test['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_test['Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_test['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]    \n",
    "    utt_forward_test.append(utt_forward)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['你是不是真的不知道现在高几了',\n",
       " '你不知道我告诉你',\n",
       " '你现在高三了',\n",
       " '不再是高一高二了',\n",
       " '你说',\n",
       " '你说你 天天天天',\n",
       " '学习不灵，打架门清，一个暑假我给你报个补习班',\n",
       " '原指望你好好学习能上去',\n",
       " '结果断崖式的下滑',\n",
       " '你对得起我吗']"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_train[30:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班.原指望你好好学习能上去',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班.原指望你好好学习能上去.结果断崖式的下滑']"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "utt_forward_train[30:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(x_valid_forward_utt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tokenize"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [[101, 872, 3221, 6443, 102, 872, 3221, 6443, 1557, 102], [101, 2769, 6432, 102, 2769, 6432, 872, 102, 0, 0], [101, 2523, 2487, 102, 2523, 2487, 102, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]}"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer(['你是谁','我说','很强'],['你是谁啊','我说你','很强'],padding=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_encoding = tokenizer(utt_forward_train, truncation=True, padding=True, max_length=512)\n",
    "# max_length对文本做截断\n",
    "valid_encoding = tokenizer(utt_forward_valid, truncation=True, padding=True, max_length=512)\n",
    "test_encoding = tokenizer(utt_forward_test, truncation=True, padding=True, max_length=512)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 准备训练集，验证集，测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据集读取 转成dict形式\n",
    "class NewsDataset(Dataset):\n",
    "    def __init__(self, encodings, labels):\n",
    "        self.encodings = encodings\n",
    "        self.labels = labels\n",
    "    \n",
    "    # 读取单个样本\n",
    "    def __getitem__(self, idx):\n",
    "        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n",
    "        item['labels'] = torch.tensor(int(self.labels[idx]))\n",
    "        return item\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.labels)\n",
    "\n",
    "train_dataset = NewsDataset(train_encoding, train_label)\n",
    "valid_dataset = NewsDataset(valid_encoding, valid_label)\n",
    "test_dataset = NewsDataset(test_encoding, test_label)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## WeightedRandomSampler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# labels = [train_dataset[i]['labels'] for i in range(len(train_dataset))]\n",
    "# # labels[20:30]\n",
    "# # train_label[20:30]\n",
    "# weights = [1 if label == 2 else 2 for label in labels]\n",
    "# # len(train_dataset)\n",
    "# # weights[20:30]\n",
    "# from torch.utils.data.sampler import  WeightedRandomSampler\n",
    "# sampler = WeightedRandomSampler(weights,\\\n",
    "#                                 num_samples=70000,\\\n",
    "#                                 replacement=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载到torch的dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个读取到批量读取\n",
    "train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "\n",
    "# train_loader = DataLoader(train_dataset, batch_size=config.batch_size, sampler=sampler)\n",
    "valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 定义model\n",
    "class Bert_Model(nn.Module):\n",
    "    def __init__(self, bert_path, classes=3):\n",
    "        super(Bert_Model, self).__init__()\n",
    "        self.config = BertConfig.from_pretrained(bert_path)  # 导入模型超参数\n",
    "        self.bert = BertModel.from_pretrained(bert_path)     # 加载预训练模型权重\n",
    "        self.fc = nn.Linear(self.config.hidden_size, classes)  # 直接分类\n",
    "        self.dense = nn.Linear(self.config.hidden_size, self.config.hidden_size)\n",
    "        self.activation = nn.Tanh()\n",
    "\n",
    "        \n",
    "        \n",
    "    def forward(self, input_ids, attention_mask=None, token_type_ids=None):\n",
    "        outputs = self.bert(input_ids, attention_mask, token_type_ids)\n",
    "        out_pool = outputs[1]   # 池化后的输出 [bs, config.hidden_size]\n",
    "        # out_pool = self.dense(out_pool)\n",
    "        # out_pool = self.activation(out_pool)\n",
    "        logit = self.fc(out_pool)   #  [bs, classes]\n",
    "        return logit"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 实例化bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext were not used when initializing BertModel: ['cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.bias', 'cls.seq_relationship.weight', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.bias', 'cls.predictions.bias']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total parameters: 102860547, Trainable parameters: 102860547\n"
     ]
    }
   ],
   "source": [
    "def get_parameter_number(model):\n",
    "    #  打印模型参数量\n",
    "    total_num = sum(p.numel() for p in model.parameters())\n",
    "    trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "    return 'Total parameters: {}, Trainable parameters: {}'.format(total_num, trainable_num)\n",
    "\n",
    "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "EPOCHS = config.epochs\n",
    "model = Bert_Model(bert_path)\n",
    "model = nn.DataParallel(model)\n",
    "model = model.cuda()\n",
    "print(get_parameter_number(model))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 优化器定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = AdamW(model.parameters(), lr=config.lr, weight_decay=1e-4) #AdamW优化器\n",
    "scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=0.3*len(train_loader),\n",
    "                                            num_training_steps=EPOCHS*len(train_loader))\n",
    "# 学习率先线性warmup一个epoch，然后cosine式下降。\n",
    "# 这里给个小提示，一定要加warmup（学习率从0慢慢升上去），如果把warmup去掉，可能收敛不了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 准备放入多卡环境\n",
    "# model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义训练函数和验证测试函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 评估模型性能，在验证集上\n",
    "def evaluate(model, data_loader, device):\n",
    "    model.eval()\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    val_true, val_pred = [], []\n",
    "    valid_loss_sum = 0.0\n",
    "    with torch.no_grad():\n",
    "        for idx, batch in enumerate(data_loader):\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            loss = criterion(y_pred, batch['labels'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "            val_true.extend(batch['labels'].cpu().numpy().tolist())\n",
    "            valid_loss_sum += loss.item()\n",
    "            \n",
    "    print(classification_report(val_true, val_pred, digits=4))\n",
    "    return accuracy_score(val_true, val_pred), valid_loss_sum/len(data_loader), f1_score(val_true, val_pred, average='macro')  #返回accuracy, loss, f1-macro\n",
    "\n",
    "\n",
    "# 测试集没有标签，需要预测提交\n",
    "def predict(model, data_loader, device):\n",
    "    model.eval()\n",
    "    val_pred = []\n",
    "    with torch.no_grad():\n",
    "        for batch in data_loader:\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "    return val_pred\n",
    "\n",
    "\n",
    "def train_and_eval(model, train_loader, valid_loader, \n",
    "                   optimizer, scheduler, device, epoch):\n",
    "    best_acc = 0.0\n",
    "    patience = 0\n",
    "    best_loss = 100\n",
    "    best_macro_f1 = 0\n",
    "    b = 0.6\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    for i in range(epoch):\n",
    "        \"\"\"训练模型\"\"\"\n",
    "        start = time.time()\n",
    "        model.train()\n",
    "        print(\"***** Running training epoch {} *****\".format(i+1))\n",
    "        train_loss_sum = 0.0\n",
    "        for idx, batch in enumerate(train_loader):\n",
    "            ids = batch['input_ids'].to(device)\n",
    "            att = batch['attention_mask'].to(device)\n",
    "            tpe = batch['token_type_ids'].to(device)\n",
    "            y = batch['labels'].to(device)  \n",
    "            y_pred = model(ids, att, tpe)\n",
    "            loss = criterion(y_pred, y)\n",
    "            loss = (loss - b).abs() + b # This is it!\n",
    "            step_lr = np.array([param_group[\"lr\"] for param_group in optimizer.param_groups]).mean()\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            scheduler.step()   # 学习率变化\n",
    "            \n",
    "            train_loss_sum += loss.item()\n",
    "            if (idx + 1) % (len(train_loader)//20) == 0:    # 只打印五次结果\n",
    "                wandb.log({\n",
    "                            'Epoch': i+1, \n",
    "                            'train_loss': loss,\n",
    "                            'lr': step_lr\n",
    "                            })\n",
    "                print(\"Epoch {:04d} | Step {:04d}/{:04d} | Loss {:.4f} | Time {:.4f}\".format(\n",
    "                          i+1, idx+1, len(train_loader), train_loss_sum/(idx+1), time.time() - start))\n",
    "                # print(\"Learning rate = {}\".format(optimizer.state_dict()['param_groups'][0]['lr']))\n",
    "\n",
    "        \"\"\"验证模型\"\"\"\n",
    "        model.eval()\n",
    "        acc, valid_loss, valid_macro_f1 = evaluate(model, valid_loader, device)  # 验证模型的性能\n",
    "        wandb.log({'valid_acc': acc, 'valid_loss': valid_loss})\n",
    "        # 保存最优模型\n",
    "        if valid_loss < best_loss:\n",
    "            best_loss = valid_loss\n",
    "            torch.save(model.state_dict(), \"best_bert_model.bin\") \n",
    "            print('a Better model has been saved!')\n",
    "\n",
    "        # if valid_macro_f1 > best_macro_f1:\n",
    "        #     best_macro_f1 = valid_macro_f1\n",
    "            \n",
    "        \n",
    "        print(\"current macro_f1 is {:.4f}, best macro_f1 is {:.4f}\".format(acc, best_acc))\n",
    "        print(\"time costed = {}s \\n\".format(round(time.time() - start, 5)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和验证模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***** Running training epoch 1 *****\n",
      "Epoch 0001 | Step 0294/5887 | Loss 1.0287 | Time 135.8588\n",
      "Epoch 0001 | Step 0588/5887 | Loss 1.0003 | Time 270.0566\n",
      "Epoch 0001 | Step 0882/5887 | Loss 0.9786 | Time 405.9551\n",
      "Epoch 0001 | Step 1176/5887 | Loss 0.9670 | Time 537.7769\n",
      "Epoch 0001 | Step 1470/5887 | Loss 0.9561 | Time 669.6577\n",
      "Epoch 0001 | Step 1764/5887 | Loss 0.9472 | Time 803.7852\n",
      "Epoch 0001 | Step 2058/5887 | Loss 0.9389 | Time 937.7430\n",
      "Epoch 0001 | Step 2352/5887 | Loss 0.9311 | Time 1071.7541\n",
      "Epoch 0001 | Step 2646/5887 | Loss 0.9233 | Time 1205.6315\n",
      "Epoch 0001 | Step 2940/5887 | Loss 0.9165 | Time 1339.3904\n",
      "Epoch 0001 | Step 3234/5887 | Loss 0.9086 | Time 1471.6045\n",
      "Epoch 0001 | Step 3528/5887 | Loss 0.9016 | Time 1603.7539\n",
      "Epoch 0001 | Step 3822/5887 | Loss 0.8947 | Time 1737.7191\n",
      "Epoch 0001 | Step 4116/5887 | Loss 0.8890 | Time 1871.7235\n",
      "Epoch 0001 | Step 4410/5887 | Loss 0.8835 | Time 2005.7000\n",
      "Epoch 0001 | Step 4704/5887 | Loss 0.8793 | Time 2139.5571\n",
      "Epoch 0001 | Step 4998/5887 | Loss 0.8734 | Time 2273.4501\n",
      "Epoch 0001 | Step 5292/5887 | Loss 0.8686 | Time 2407.4008\n",
      "Epoch 0001 | Step 5586/5887 | Loss 0.8637 | Time 2541.5339\n",
      "Epoch 0001 | Step 5880/5887 | Loss 0.8597 | Time 2675.8429\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3835    0.3893    0.3864      3126\n",
      "           1     0.3161    0.2163    0.2568      1692\n",
      "           2     0.6660    0.7174    0.6907      6319\n",
      "\n",
      "    accuracy                         0.5492     11137\n",
      "   macro avg     0.4552    0.4410    0.4447     11137\n",
      "weighted avg     0.5336    0.5492    0.5394     11137\n",
      "\n",
      "a Better model has been saved!\n",
      "current macro_f1 is 0.5492, best macro_f1 is 0.0000\n",
      "time costed = 2778.47261s \n",
      "\n",
      "***** Running training epoch 2 *****\n",
      "Epoch 0002 | Step 0294/5887 | Loss 0.7187 | Time 132.0484\n",
      "Epoch 0002 | Step 0588/5887 | Loss 0.7178 | Time 265.8586\n",
      "Epoch 0002 | Step 0882/5887 | Loss 0.7219 | Time 397.8540\n",
      "Epoch 0002 | Step 1176/5887 | Loss 0.7215 | Time 531.6558\n",
      "Epoch 0002 | Step 1470/5887 | Loss 0.7211 | Time 663.7179\n",
      "Epoch 0002 | Step 1764/5887 | Loss 0.7214 | Time 795.7494\n",
      "Epoch 0002 | Step 2058/5887 | Loss 0.7217 | Time 929.4606\n",
      "Epoch 0002 | Step 2352/5887 | Loss 0.7212 | Time 1063.2942\n",
      "Epoch 0002 | Step 2646/5887 | Loss 0.7199 | Time 1195.2192\n",
      "Epoch 0002 | Step 2940/5887 | Loss 0.7194 | Time 1328.9165\n",
      "Epoch 0002 | Step 3234/5887 | Loss 0.7192 | Time 1462.5017\n",
      "Epoch 0002 | Step 3528/5887 | Loss 0.7190 | Time 1596.1285\n",
      "Epoch 0002 | Step 3822/5887 | Loss 0.7190 | Time 1729.6949\n",
      "Epoch 0002 | Step 4116/5887 | Loss 0.7193 | Time 1861.5823\n",
      "Epoch 0002 | Step 4410/5887 | Loss 0.7182 | Time 1995.2016\n",
      "Epoch 0002 | Step 4704/5887 | Loss 0.7180 | Time 2127.1751\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "wandb: Network error (ReadTimeout), entering retry loop.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0002 | Step 4998/5887 | Loss 0.7173 | Time 2262.7537\n",
      "Epoch 0002 | Step 5292/5887 | Loss 0.7167 | Time 2396.4801\n",
      "Epoch 0002 | Step 5586/5887 | Loss 0.7165 | Time 2528.4684\n",
      "Epoch 0002 | Step 5880/5887 | Loss 0.7162 | Time 2662.2145\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3959    0.4053    0.4006      3126\n",
      "           1     0.2996    0.2015    0.2410      1692\n",
      "           2     0.6723    0.7234    0.6969      6319\n",
      "\n",
      "    accuracy                         0.5548     11137\n",
      "   macro avg     0.4560    0.4434    0.4462     11137\n",
      "weighted avg     0.5381    0.5548    0.5445     11137\n",
      "\n",
      "a Better model has been saved!\n",
      "current macro_f1 is 0.5548, best macro_f1 is 0.0000\n",
      "time costed = 2761.74043s \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<br/>Waiting for W&B process to finish, PID 22963... <strong style=\"color:green\">(success).</strong>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "23080884b7944366b8a2d68cf9b09b72",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "VBox(children=(Label(value=' 0.00MB of 0.00MB uploaded (0.00MB deduped)\\r'), FloatProgress(value=1.0, max=1.0)…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<style>\n",
       "    table.wandb td:nth-child(1) { padding: 0 10px; text-align: right }\n",
       "    .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; width: 100% }\n",
       "    .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
       "    </style>\n",
       "<div class=\"wandb-row\"><div class=\"wandb-col\">\n",
       "<h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████████████████████</td></tr><tr><td>lr</td><td>▂▃▄▆▇██████▇▇▇▇▇▆▆▆▅▅▅▅▄▄▄▃▃▃▂▂▂▂▂▁▁▁▁▁▁</td></tr><tr><td>train_loss</td><td>▅▄██▄▃▇▂▄▆▂▁▂▂▄▃▆▂▄▁▁▂▂▁▁▂▅▁▂▂▃▄▁▁▆▅▂▁▁▄</td></tr><tr><td>valid_acc</td><td>▁█</td></tr><tr><td>valid_loss</td><td>█▁</td></tr></table><br/></div><div class=\"wandb-col\">\n",
       "<h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>2</td></tr><tr><td>lr</td><td>0.0</td></tr><tr><td>train_loss</td><td>0.89312</td></tr><tr><td>valid_acc</td><td>0.55482</td></tr><tr><td>valid_loss</td><td>0.93631</td></tr></table>\n",
       "</div></div>\n",
       "Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)\n",
       "<br/>Synced <strong style=\"color:#cdcd00\">roberta-forwardUt</strong>: <a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/3b5omku0\" target=\"_blank\">https://wandb.ai/qftie/cped-emo-cls/runs/3b5omku0</a><br/>\n",
       "Find logs at: <code>./wandb/run-20220111_151831-3b5omku0/logs</code><br/>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 训练和验证评估\n",
    "train_and_eval(model, train_loader, valid_loader, optimizer, scheduler, DEVICE, EPOCHS)\n",
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载最优模型测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3736    0.4289    0.3993      7991\n",
      "           1     0.3835    0.2665    0.3145      6470\n",
      "           2     0.5680    0.6027    0.5848     12977\n",
      "\n",
      "    accuracy                         0.4728     27438\n",
      "   macro avg     0.4417    0.4327    0.4329     27438\n",
      "weighted avg     0.4679    0.4728    0.4670     27438\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 加载最优权重对测试集测试\n",
    "model.load_state_dict(torch.load(\"best_bert_model.bin\"))\n",
    "pred_test = evaluate(model, test_loader, DEVICE)\n",
    "# print(\"\\n Test Accuracy = {} \\n\".format(accuracy_score(test_label, pred_test)))\n",
    "# print(classification_report(test_label, pred_test, digits=4))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "# output_dir = 'output/ch-roberta-dorwardUt'\n",
    "# os.makedirs(output_dir, exist_ok=True)\n",
    "# torch.save(model.state_dict(), output_dir+\"/pytorch_model.bin\")\n",
    "# # torch.save(model, \"pytorch_model_whole.bin\")"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "49e5bd26eb5fb0a8ffb649f62262c127e261ba51230dc2578599ab5938abf7ca"
  },
  "kernelspec": {
   "display_name": "Python 3.8.0 64-bit ('torch17py38': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
