{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /home/phd-fan.weiquan2/.netrc\n"
     ]
    }
   ],
   "source": [
    "!wandb login fef2375a2e5bbeff568e67838b427f129b7fa678"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='0,1,5,6'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mqftie\u001b[0m (use `wandb login --relogin` to force relogin)\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: wandb version 0.12.11 is available!  To upgrade, please run:\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m:  $ pip install wandb --upgrade\n",
      "2022-03-04 17:39:28.893448: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-03-04 17:39:28.893493: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "                    Syncing run <strong><a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/1m9wr3tr\" target=\"_blank\">roberta-forwardUt</a></strong> to <a href=\"https://wandb.ai/qftie/cped-emo-cls\" target=\"_blank\">Weights & Biases</a> (<a href=\"https://docs.wandb.com/integrations/jupyter.html\" target=\"_blank\">docs</a>).<br/>\n",
       "\n",
       "                "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<button onClick=\"this.nextSibling.style.display='block';this.style.display='none';\">Display W&B run</button><iframe src=\"https://wandb.ai/qftie/cped-emo-cls/runs/1m9wr3tr?jupyter=true\" style=\"border:none;width:100%;height:420px;display:none;\"></iframe>"
      ],
      "text/plain": [
       "<wandb.sdk.wandb_run.Run at 0x7f0283130c70>"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Inside my model training code\n",
    "import wandb\n",
    "\n",
    "\n",
    "\n",
    "config = wandb.config          # Initialize config\n",
    "config.batch_size = 32          # input batch size for training (default: 64)\n",
    "config.test_batch_size = 26    # input batch size for testing (default: 1000)\n",
    "config.epochs = 5             # number of epochs to train (default: 10)\n",
    "config.lr = 2e-5               # learning rate (default: 0.01)\n",
    "config.momentum = 0.1          # SGD momentum (default: 0.5) \n",
    "config.no_cuda = False         # disables CUDA training\n",
    "config.bert_path = 'hfl/chinese-roberta-wwm-ext'\n",
    "config.exam_name = 'roberta-forwardUt'\n",
    "config.max_seq_len = 512\n",
    "\n",
    "wandb.init(project=\"cped-emo-cls\",entity='qftie',group='bert-cls7',name=config.exam_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-03-04 17:39:35.245177: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-03-04 17:39:35.245213: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd \n",
    "import numpy as np \n",
    "import json, time\n",
    "from tqdm import tqdm \n",
    "from sklearn.metrics import accuracy_score, classification_report, f1_score\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader, TensorDataset \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "from transformers import BertModel, BertConfig, AutoTokenizer, AdamW, get_cosine_schedule_with_warmup\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "bert_path = config.bert_path\n",
    "tokenizer = AutoTokenizer.from_pretrained(bert_path)   # 初始化分词器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ## 准备多gpu\n",
    "# from accelerate import Accelerator\n",
    "# accelerator = Accelerator(split_batches=True)\n",
    "# DEVICE = accelerator.device\n",
    "# DEVICE"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预处理数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # mmmtd版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'中性情绪':0, '正向情绪':1, '负向情绪':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['情绪(粗粒度)']]\n",
    "\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = data_text[data_text['tv']<=26]\n",
    "# data_text_valid = data_text[(data_text['tv']==32) | (data_text['tv']>=39)]\n",
    "# data_text_test = data_text[((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))]\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['说话内容'].tolist()\n",
    "# train_label = data_text_train['Emotion_3'].tolist()\n",
    "\n",
    "# x_valid = data_text_valid['说话内容'].tolist()\n",
    "# valid_label = data_text_valid['Emotion_3'].tolist()\n",
    "\n",
    "# x_test = data_text_test['说话内容'].tolist()\n",
    "# test_label = data_text_test['Emotion_3'].tolist()\n",
    "\n",
    "# train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)\n",
    "# # max_length对文本做截断\n",
    "# valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=120)\n",
    "# test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # CPED版本数据集，使用中性，正向，负向三分类时将这个block取消注释，并修改classes为3\n",
    "# # data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# # data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'neutral':0, 'positive':1, 'negative':2}\n",
    "# # data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "# data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "# data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['Utterance'].tolist()\n",
    "# train_label = [emo2id[x] for x in data_text_train['Sentiment']]\n",
    "\n",
    "# x_valid = data_text_valid['Utterance'].tolist()\n",
    "# valid_label = [emo2id[x] for x in data_text_valid['Sentiment']]\n",
    "\n",
    "# x_test = data_text_test['Utterance'].tolist()\n",
    "# test_label = [emo2id[x] for x in data_text_test['Sentiment']]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # CPED版本数据集，使用13分类映射后的7分类时，将这个block取消注释，并修改classes为7\n",
    "# # data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# # data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'happy':0,'grateful':0, 'relaxed':0, 'positive-other':0, 'neutral':1, 'anger':2, 'sadness':3, 'depress':3, 'fear':4, 'worried':4, 'astonished':5, 'disgust':6, 'negative-other':6}\n",
    "# # data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "# data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "# data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['Utterance'].tolist()\n",
    "# train_label = [emo2id[x] for x in data_text_train['Emotion']]\n",
    "\n",
    "# x_valid = data_text_valid['Utterance'].tolist()\n",
    "# valid_label = [emo2id[x] for x in data_text_valid['Emotion']]\n",
    "\n",
    "# x_test = data_text_test['Utterance'].tolist()\n",
    "# test_label = [emo2id[x] for x in data_text_test['Emotion']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPED版本数据集，使用13分类映射后的5分类时，将这个block取消注释，并修改classes为5\n",
    "\n",
    "emo2id = {'happy':0,'grateful':0, 'relaxed':0, 'positive-other':0, 'neutral':1, 'anger':2, 'sadness':3, 'depress':3, 'fear':4, 'worried':4, 'astonished':4, 'disgust':2, 'negative-other':3}\n",
    "\n",
    "\n",
    "data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "x_train = data_text_train['Utterance'].tolist()\n",
    "train_label = [emo2id[x] for x in data_text_train['Emotion']]\n",
    "\n",
    "x_valid = data_text_valid['Utterance'].tolist()\n",
    "valid_label = [emo2id[x] for x in data_text_valid['Emotion']]\n",
    "\n",
    "x_test = data_text_test['Utterance'].tolist()\n",
    "test_label = [emo2id[x] for x in data_text_test['Emotion']]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 处理audio特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "data_audio = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/Features/MMMTD_audio_IS13_ComParE_PCA100.csv')\n",
    "data_audio['Utterance_ID'] = [s[1:-1] for s in data_audio['vvid']] #加一列tv表示某部剧的编号\n",
    "data_audio['tv'] = [int(s[:2]) for s in data_audio['Utterance_ID']]\n",
    "train_index = data_audio['tv']<=26\n",
    "valid_index = (data_audio['tv']==32) | (data_audio['tv']>=39)\n",
    "test_index = ((data_audio['tv']>=27) & (data_audio['tv']<32)) | ((data_audio['tv']>32) & (data_audio['tv']<=37))\n",
    "\n",
    "audio_train = data_audio[train_index].values[:,1:-2].tolist()\n",
    "audio_valid = data_audio[valid_index].values[:,1:-2].tolist()\n",
    "audio_test = data_audio[test_index].values[:,1:-2].tolist()\n",
    "\n",
    "# train_audio = [data_audio.query(\"Utterance_ID == @ut\").values.tolist()[0][1:-2] for ut in data_text_train['Utterance_ID']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# data_audio['Utterance_ID'][1:30]\n",
    "# data_audio[data_audio['Utterance_ID']=='01_000_002']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# data_audio.query(\"Utterance_ID == '01_000_002'\").values.tolist()[0][1:-1]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 准备forward utterance作为sentence1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# back_stride = int(data_text_valid['Utterance_ID'][50][-3:])\n",
    "# ' '.join(data_text_valid['Utterance'][ 50- back_stride: 50].tolist())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_len = config.max_seq_len\n",
    "\n",
    "utt_forward_train = []\n",
    "for i in range(len(data_text_train)):\n",
    "    back_stride = int(data_text_train['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_train['Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_train['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]\n",
    "    utt_forward_train.append(utt_forward)\n",
    "\n",
    "utt_forward_valid = []\n",
    "for i in range(len(data_text_valid)):\n",
    "    back_stride = int(data_text_valid['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_valid['Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_valid['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]\n",
    "    utt_forward_valid.append(utt_forward)\n",
    "\n",
    "utt_forward_test = []\n",
    "for i in range(len(data_text_test)):\n",
    "    back_stride = int(data_text_test['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_test['Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_test['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]    \n",
    "    utt_forward_test.append(utt_forward)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['你是不是真的不知道现在高几了',\n",
       " '你不知道我告诉你',\n",
       " '你现在高三了',\n",
       " '不再是高一高二了',\n",
       " '你说',\n",
       " '你说你 天天天天',\n",
       " '学习不灵，打架门清，一个暑假我给你报个补习班',\n",
       " '原指望你好好学习能上去',\n",
       " '结果断崖式的下滑',\n",
       " '你对得起我吗']"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_train[30:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班.原指望你好好学习能上去',\n",
       " '妈.你不要叫我妈.我不是你妈.妈，我错了.你没错.我错了.我为什么要生你.我吃饱了撑的我就不该生你.方一凡我真的不明白你脑子里面天天在想什么.你是不是真的不知道现在高几了.你不知道我告诉你.你现在高三了.不再是高一高二了.你说.你说你 天天天天.学习不灵，打架门清，一个暑假我给你报个补习班.原指望你好好学习能上去.结果断崖式的下滑']"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "utt_forward_train[30:40]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(x_valid_forward_utt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tokenize"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [[101, 872, 3221, 6443, 102, 872, 3221, 6443, 1557, 102], [101, 2769, 6432, 102, 2769, 6432, 872, 102, 0, 0], [101, 2523, 2487, 102, 2523, 2487, 102, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]}"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer(['你是谁','我说','很强'],['你是谁啊','我说你','很强'],padding=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_encoding = tokenizer(x_train, utt_forward_train, truncation=True, padding=True, max_length=512)\n",
    "# max_length对文本做截断\n",
    "valid_encoding = tokenizer(x_valid, utt_forward_valid, truncation=True, padding=True, max_length=512)\n",
    "test_encoding = tokenizer(x_test, utt_forward_test, truncation=True, padding=True, max_length=512)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 准备训练集，验证集，测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据集读取 转成dict形式\n",
    "class NewsDataset(Dataset):\n",
    "    def __init__(self, encodings, audio_features, labels):\n",
    "        self.encodings = encodings\n",
    "        self.labels = labels\n",
    "        self.af = audio_features\n",
    "    \n",
    "    # 读取单个样本\n",
    "    def __getitem__(self, idx):\n",
    "        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n",
    "        item['audio_features'] = torch.tensor(self.af[idx])\n",
    "        item['labels'] = torch.tensor(int(self.labels[idx]))\n",
    "        return item\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.labels)\n",
    "\n",
    "train_dataset = NewsDataset(train_encoding, audio_train, train_label)\n",
    "valid_dataset = NewsDataset(valid_encoding, audio_valid, valid_label)\n",
    "test_dataset = NewsDataset(test_encoding, audio_test, test_label)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## WeightedRandomSampler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# labels = [train_dataset[i]['labels'] for i in range(len(train_dataset))]\n",
    "# # labels[20:30]\n",
    "# # train_label[20:30]\n",
    "# weights = [1 if label == 2 else 2 for label in labels]\n",
    "# # len(train_dataset)\n",
    "# # weights[20:30]\n",
    "# from torch.utils.data.sampler import  WeightedRandomSampler\n",
    "# sampler = WeightedRandomSampler(weights,\\\n",
    "#                                 num_samples=70000,\\\n",
    "#                                 replacement=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载到torch的dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个读取到批量读取\n",
    "train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "\n",
    "# train_loader = DataLoader(train_dataset, batch_size=config.batch_size, sampler=sampler)\n",
    "valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 定义model\n",
    "class Bert_Model(nn.Module):\n",
    "    def __init__(self, bert_path, classes=5):\n",
    "        super(Bert_Model, self).__init__()\n",
    "        self.config = BertConfig.from_pretrained(bert_path)  # 导入模型超参数\n",
    "        self.bert = BertModel.from_pretrained(bert_path)     # 加载预训练模型权重\n",
    "        self.fc = nn.Linear(self.config.hidden_size+100, classes)  # 直接分类\n",
    "        self.fc1 = nn.Linear(self.config.hidden_size, 100)\n",
    "        self.fc2 = nn.Linear(200, classes)\n",
    "        self.dense = nn.Linear(self.config.hidden_size, self.config.hidden_size)\n",
    "        self.activation = nn.Tanh()\n",
    "\n",
    "        \n",
    "        \n",
    "    def forward(self, input_ids, attention_mask=None, token_type_ids=None, audio_features=None):\n",
    "        outputs = self.bert(input_ids, attention_mask, token_type_ids)\n",
    "\n",
    "        # out_pool = torch.hstack((outputs[1],audio_features))   # 池化后的输出 [bs, config.hidden_size]\n",
    "        # # out_pool = self.dense(out_pool)\n",
    "        # # out_pool = self.activation(out_pool)\n",
    "        # logit = self.fc(out_pool)   #  [bs, classes]\n",
    "\n",
    "        out_pool = self.fc1(outputs[1])\n",
    "        out_pool = torch.hstack((out_pool,audio_features)) \n",
    "        logit = self.fc2(out_pool)\n",
    "        return logit"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 实例化bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext were not used when initializing BertModel: ['cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.weight', 'cls.predictions.bias']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total parameters: 102940490, Trainable parameters: 102940490\n"
     ]
    }
   ],
   "source": [
    "def get_parameter_number(model):\n",
    "    #  打印模型参数量\n",
    "    total_num = sum(p.numel() for p in model.parameters())\n",
    "    trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "    return 'Total parameters: {}, Trainable parameters: {}'.format(total_num, trainable_num)\n",
    "\n",
    "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "EPOCHS = config.epochs\n",
    "model = Bert_Model(bert_path)\n",
    "model = nn.DataParallel(model)\n",
    "model = model.cuda()\n",
    "print(get_parameter_number(model))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 优化器定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = AdamW(model.parameters(), lr=config.lr, weight_decay=1e-4) #AdamW优化器\n",
    "scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=0.3*len(train_loader),\n",
    "                                            num_training_steps=EPOCHS*len(train_loader))\n",
    "# 学习率先线性warmup一个epoch，然后cosine式下降。\n",
    "# 这里给个小提示，一定要加warmup（学习率从0慢慢升上去），如果把warmup去掉，可能收敛不了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 准备放入多卡环境\n",
    "# model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义训练函数和验证测试函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 评估模型性能，在验证集上\n",
    "def evaluate(model, data_loader, device):\n",
    "    model.eval()\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    val_true, val_pred = [], []\n",
    "    valid_loss_sum = 0.0\n",
    "    with torch.no_grad():\n",
    "        for idx, batch in enumerate(data_loader):\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device),batch['audio_features'].to(device))\n",
    "            loss = criterion(y_pred, batch['labels'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "            val_true.extend(batch['labels'].cpu().numpy().tolist())\n",
    "            valid_loss_sum += loss.item()\n",
    "            \n",
    "    print(classification_report(val_true, val_pred, digits=4))\n",
    "    return accuracy_score(val_true, val_pred), valid_loss_sum/len(data_loader), f1_score(val_true, val_pred, average='macro')  #返回accuracy, loss, f1-macro\n",
    "\n",
    "\n",
    "# 测试集没有标签，需要预测提交\n",
    "def predict(model, data_loader, device):\n",
    "    model.eval()\n",
    "    val_pred = []\n",
    "    with torch.no_grad():\n",
    "        for batch in data_loader:\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device), batch['audio_features'].to(device) )\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "    return val_pred\n",
    "\n",
    "\n",
    "def train_and_eval(model, train_loader, valid_loader, \n",
    "                   optimizer, scheduler, device, epoch):\n",
    "    best_acc = 0.0\n",
    "    patience = 0\n",
    "    best_loss = 100\n",
    "    best_macro_f1 = 0\n",
    "    b = 0.6\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    for i in range(epoch):\n",
    "        \"\"\"训练模型\"\"\"\n",
    "        start = time.time()\n",
    "        model.train()\n",
    "        print(\"***** Running training epoch {} *****\".format(i+1))\n",
    "        train_loss_sum = 0.0\n",
    "        for idx, batch in enumerate(train_loader):\n",
    "            ids = batch['input_ids'].to(device)\n",
    "            att = batch['attention_mask'].to(device)\n",
    "            tpe = batch['token_type_ids'].to(device)\n",
    "            af = batch['audio_features'].to(device)\n",
    "            y = batch['labels'].to(device)  \n",
    "            y_pred = model(ids, att, tpe, af)\n",
    "            loss = criterion(y_pred, y)\n",
    "            loss = (loss - b).abs() + b # This is it!\n",
    "            step_lr = np.array([param_group[\"lr\"] for param_group in optimizer.param_groups]).mean()\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            scheduler.step()   # 学习率变化\n",
    "            \n",
    "            train_loss_sum += loss.item()\n",
    "            if (idx + 1) % (len(train_loader)//20) == 0:    # 只打印五次结果\n",
    "                wandb.log({\n",
    "                            'Epoch': i+1, \n",
    "                            'train_loss': loss,\n",
    "                            'lr': step_lr\n",
    "                            })\n",
    "                print(\"Epoch {:04d} | Step {:04d}/{:04d} | Loss {:.4f} | Time {:.4f}\".format(\n",
    "                          i+1, idx+1, len(train_loader), train_loss_sum/(idx+1), time.time() - start))\n",
    "                # print(\"Learning rate = {}\".format(optimizer.state_dict()['param_groups'][0]['lr']))\n",
    "\n",
    "        \"\"\"验证模型\"\"\"\n",
    "        model.eval()\n",
    "        acc, valid_loss, valid_macro_f1 = evaluate(model, valid_loader, device)  # 验证模型的性能\n",
    "        wandb.log({'valid_acc': acc, 'valid_loss': valid_loss})\n",
    "        # 保存最优模型\n",
    "        if valid_loss < best_loss:\n",
    "            best_loss = valid_loss\n",
    "            torch.save(model.state_dict(), \"best_bert_model.bin\") \n",
    "\n",
    "        if valid_macro_f1 > best_macro_f1:\n",
    "            best_macro_f1 = valid_macro_f1\n",
    "            \n",
    "        \n",
    "        print(\"current macro_f1 is {:.4f}, best macro_f1 is {:.4f}\".format(acc, best_acc))\n",
    "        print(\"time costed = {}s \\n\".format(round(time.time() - start, 5)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和验证模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***** Running training epoch 1 *****\n",
      "Epoch 0001 | Step 0147/2944 | Loss 2.9292 | Time 104.7880\n",
      "Epoch 0001 | Step 0294/2944 | Loss 2.8011 | Time 202.8490\n",
      "Epoch 0001 | Step 0441/2944 | Loss 2.7528 | Time 298.9002\n",
      "Epoch 0001 | Step 0588/2944 | Loss 2.6951 | Time 398.6915\n",
      "Epoch 0001 | Step 0735/2944 | Loss 2.6501 | Time 496.6541\n",
      "Epoch 0001 | Step 0882/2944 | Loss 2.6013 | Time 593.5265\n",
      "Epoch 0001 | Step 1029/2944 | Loss 2.5602 | Time 691.4713\n",
      "Epoch 0001 | Step 1176/2944 | Loss 2.5146 | Time 789.7409\n",
      "Epoch 0001 | Step 1323/2944 | Loss 2.4737 | Time 885.3186\n",
      "Epoch 0001 | Step 1470/2944 | Loss 2.4390 | Time 983.0337\n",
      "Epoch 0001 | Step 1617/2944 | Loss 2.4018 | Time 1081.0486\n",
      "Epoch 0001 | Step 1764/2944 | Loss 2.3641 | Time 1178.8110\n",
      "Epoch 0001 | Step 1911/2944 | Loss 2.3325 | Time 1274.5034\n",
      "Epoch 0001 | Step 2058/2944 | Loss 2.2980 | Time 1372.1018\n",
      "Epoch 0001 | Step 2205/2944 | Loss 2.2643 | Time 1467.8702\n",
      "Epoch 0001 | Step 2352/2944 | Loss 2.2316 | Time 1565.9755\n",
      "Epoch 0001 | Step 2499/2944 | Loss 2.2007 | Time 1663.4229\n",
      "Epoch 0001 | Step 2646/2944 | Loss 2.1718 | Time 1760.8096\n",
      "Epoch 0001 | Step 2793/2944 | Loss 2.1461 | Time 1856.5781\n",
      "Epoch 0001 | Step 2940/2944 | Loss 2.1191 | Time 1954.9102\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.2791    0.3528    0.3117      1692\n",
      "           1     0.3802    0.5429    0.4472      3126\n",
      "           2     0.4262    0.4474    0.4365      1806\n",
      "           3     0.4191    0.2461    0.3101      3422\n",
      "           4     0.2397    0.1384    0.1755      1091\n",
      "\n",
      "    accuracy                         0.3677     11137\n",
      "   macro avg     0.3489    0.3455    0.3362     11137\n",
      "weighted avg     0.3705    0.3677    0.3561     11137\n",
      "\n",
      "current macro_f1 is 0.3677, best macro_f1 is 0.0000\n",
      "time costed = 2040.87089s \n",
      "\n",
      "***** Running training epoch 2 *****\n",
      "Epoch 0002 | Step 0147/2944 | Loss 1.3933 | Time 97.3423\n",
      "Epoch 0002 | Step 0294/2944 | Loss 1.3842 | Time 193.6038\n",
      "Epoch 0002 | Step 0441/2944 | Loss 1.3764 | Time 291.4445\n",
      "Epoch 0002 | Step 0588/2944 | Loss 1.3650 | Time 389.4090\n",
      "Epoch 0002 | Step 0735/2944 | Loss 1.3559 | Time 485.2343\n",
      "Epoch 0002 | Step 0882/2944 | Loss 1.3473 | Time 582.8619\n",
      "Epoch 0002 | Step 1029/2944 | Loss 1.3396 | Time 680.9982\n",
      "Epoch 0002 | Step 1176/2944 | Loss 1.3262 | Time 776.7945\n",
      "Epoch 0002 | Step 1323/2944 | Loss 1.3152 | Time 874.6039\n",
      "Epoch 0002 | Step 1470/2944 | Loss 1.3062 | Time 972.4587\n",
      "Epoch 0002 | Step 1617/2944 | Loss 1.2952 | Time 1068.0157\n",
      "Epoch 0002 | Step 1764/2944 | Loss 1.2850 | Time 1166.8159\n",
      "Epoch 0002 | Step 1911/2944 | Loss 1.2765 | Time 1264.4546\n",
      "Epoch 0002 | Step 2058/2944 | Loss 1.2682 | Time 1360.8904\n",
      "Epoch 0002 | Step 2205/2944 | Loss 1.2599 | Time 1458.2154\n",
      "Epoch 0002 | Step 2352/2944 | Loss 1.2538 | Time 1556.1608\n",
      "Epoch 0002 | Step 2499/2944 | Loss 1.2485 | Time 1651.7908\n",
      "Epoch 0002 | Step 2646/2944 | Loss 1.2431 | Time 1749.1648\n",
      "Epoch 0002 | Step 2793/2944 | Loss 1.2353 | Time 1848.0039\n",
      "Epoch 0002 | Step 2940/2944 | Loss 1.2284 | Time 1943.5694\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3535    0.2931    0.3205      1692\n",
      "           1     0.4249    0.4555    0.4397      3126\n",
      "           2     0.4269    0.5028    0.4617      1806\n",
      "           3     0.4096    0.4106    0.4101      3422\n",
      "           4     0.2458    0.1861    0.2118      1091\n",
      "\n",
      "    accuracy                         0.3983     11137\n",
      "   macro avg     0.3722    0.3696    0.3688     11137\n",
      "weighted avg     0.3921    0.3983    0.3937     11137\n",
      "\n",
      "current macro_f1 is 0.3983, best macro_f1 is 0.0000\n",
      "time costed = 2033.32819s \n",
      "\n",
      "***** Running training epoch 3 *****\n",
      "Epoch 0003 | Step 0147/2944 | Loss 0.9030 | Time 97.3447\n",
      "Epoch 0003 | Step 0294/2944 | Loss 0.8887 | Time 192.8843\n",
      "Epoch 0003 | Step 0441/2944 | Loss 0.8874 | Time 290.0981\n",
      "Epoch 0003 | Step 0588/2944 | Loss 0.8861 | Time 385.7309\n",
      "Epoch 0003 | Step 0735/2944 | Loss 0.8834 | Time 483.0600\n",
      "Epoch 0003 | Step 0882/2944 | Loss 0.8861 | Time 580.6140\n",
      "Epoch 0003 | Step 1029/2944 | Loss 0.8859 | Time 679.0565\n",
      "Epoch 0003 | Step 1176/2944 | Loss 0.8852 | Time 774.5216\n",
      "Epoch 0003 | Step 1323/2944 | Loss 0.8856 | Time 871.7993\n",
      "Epoch 0003 | Step 1470/2944 | Loss 0.8869 | Time 967.1197\n",
      "Epoch 0003 | Step 1617/2944 | Loss 0.8874 | Time 1064.3726\n",
      "Epoch 0003 | Step 1764/2944 | Loss 0.8882 | Time 1161.6651\n",
      "Epoch 0003 | Step 1911/2944 | Loss 0.8875 | Time 1259.1442\n",
      "Epoch 0003 | Step 2058/2944 | Loss 0.8853 | Time 1354.8820\n",
      "Epoch 0003 | Step 2205/2944 | Loss 0.8854 | Time 1451.9527\n",
      "Epoch 0003 | Step 2352/2944 | Loss 0.8839 | Time 1549.1722\n",
      "Epoch 0003 | Step 2499/2944 | Loss 0.8849 | Time 1644.6154\n",
      "Epoch 0003 | Step 2646/2944 | Loss 0.8815 | Time 1741.7469\n",
      "Epoch 0003 | Step 2793/2944 | Loss 0.8799 | Time 1840.3565\n",
      "Epoch 0003 | Step 2940/2944 | Loss 0.8780 | Time 1935.9184\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3356    0.3511    0.3432      1692\n",
      "           1     0.4218    0.4981    0.4568      3126\n",
      "           2     0.4098    0.4817    0.4429      1806\n",
      "           3     0.4278    0.3177    0.3646      3422\n",
      "           4     0.2500    0.2319    0.2406      1091\n",
      "\n",
      "    accuracy                         0.3916     11137\n",
      "   macro avg     0.3690    0.3761    0.3696     11137\n",
      "weighted avg     0.3918    0.3916    0.3878     11137\n",
      "\n",
      "current macro_f1 is 0.3916, best macro_f1 is 0.0000\n",
      "time costed = 2019.35632s \n",
      "\n",
      "***** Running training epoch 4 *****\n",
      "Epoch 0004 | Step 0147/2944 | Loss 0.7414 | Time 97.3343\n",
      "Epoch 0004 | Step 0294/2944 | Loss 0.7431 | Time 195.0655\n",
      "Epoch 0004 | Step 0441/2944 | Loss 0.7505 | Time 290.4677\n",
      "Epoch 0004 | Step 0588/2944 | Loss 0.7472 | Time 387.4453\n",
      "Epoch 0004 | Step 0735/2944 | Loss 0.7459 | Time 484.8053\n",
      "Epoch 0004 | Step 0882/2944 | Loss 0.7479 | Time 581.9153\n",
      "Epoch 0004 | Step 1029/2944 | Loss 0.7499 | Time 677.0534\n",
      "Epoch 0004 | Step 1176/2944 | Loss 0.7518 | Time 774.6022\n",
      "Epoch 0004 | Step 1323/2944 | Loss 0.7516 | Time 870.1693\n",
      "Epoch 0004 | Step 1470/2944 | Loss 0.7501 | Time 968.3885\n",
      "Epoch 0004 | Step 1617/2944 | Loss 0.7508 | Time 1065.7808\n",
      "Epoch 0004 | Step 1764/2944 | Loss 0.7497 | Time 1163.7553\n",
      "Epoch 0004 | Step 1911/2944 | Loss 0.7511 | Time 1259.2276\n",
      "Epoch 0004 | Step 2058/2944 | Loss 0.7552 | Time 1356.4497\n",
      "Epoch 0004 | Step 2205/2944 | Loss 0.7557 | Time 1451.8349\n",
      "Epoch 0004 | Step 2352/2944 | Loss 0.7551 | Time 1549.2693\n",
      "Epoch 0004 | Step 2499/2944 | Loss 0.7553 | Time 1646.8060\n",
      "Epoch 0004 | Step 2646/2944 | Loss 0.7551 | Time 1744.0012\n",
      "Epoch 0004 | Step 2793/2944 | Loss 0.7548 | Time 1839.9030\n",
      "Epoch 0004 | Step 2940/2944 | Loss 0.7541 | Time 1937.0780\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3347    0.3286    0.3316      1692\n",
      "           1     0.4196    0.4335    0.4264      3126\n",
      "           2     0.4408    0.4225    0.4314      1806\n",
      "           3     0.4017    0.4050    0.4034      3422\n",
      "           4     0.2486    0.2429    0.2457      1091\n",
      "\n",
      "    accuracy                         0.3883     11137\n",
      "   macro avg     0.3691    0.3665    0.3677     11137\n",
      "weighted avg     0.3879    0.3883    0.3881     11137\n",
      "\n",
      "current macro_f1 is 0.3883, best macro_f1 is 0.0000\n",
      "time costed = 2025.8719s \n",
      "\n",
      "***** Running training epoch 5 *****\n",
      "Epoch 0005 | Step 0147/2944 | Loss 0.7331 | Time 97.2581\n",
      "Epoch 0005 | Step 0294/2944 | Loss 0.7372 | Time 192.5648\n",
      "Epoch 0005 | Step 0441/2944 | Loss 0.7317 | Time 290.1821\n",
      "Epoch 0005 | Step 0588/2944 | Loss 0.7274 | Time 387.3610\n",
      "Epoch 0005 | Step 0735/2944 | Loss 0.7350 | Time 482.9392\n",
      "Epoch 0005 | Step 0882/2944 | Loss 0.7329 | Time 580.3862\n",
      "Epoch 0005 | Step 1029/2944 | Loss 0.7307 | Time 678.2592\n",
      "Epoch 0005 | Step 1176/2944 | Loss 0.7293 | Time 773.7487\n",
      "Epoch 0005 | Step 1323/2944 | Loss 0.7292 | Time 871.4635\n",
      "Epoch 0005 | Step 1470/2944 | Loss 0.7276 | Time 970.0019\n",
      "Epoch 0005 | Step 1617/2944 | Loss 0.7268 | Time 1065.4472\n",
      "Epoch 0005 | Step 1764/2944 | Loss 0.7264 | Time 1163.3497\n",
      "Epoch 0005 | Step 1911/2944 | Loss 0.7261 | Time 1260.8558\n",
      "Epoch 0005 | Step 2058/2944 | Loss 0.7293 | Time 1356.3535\n",
      "Epoch 0005 | Step 2205/2944 | Loss 0.7289 | Time 1453.6235\n",
      "Epoch 0005 | Step 2352/2944 | Loss 0.7283 | Time 1550.5741\n",
      "Epoch 0005 | Step 2499/2944 | Loss 0.7281 | Time 1647.8507\n",
      "Epoch 0005 | Step 2646/2944 | Loss 0.7281 | Time 1742.7874\n",
      "Epoch 0005 | Step 2793/2944 | Loss 0.7282 | Time 1839.7196\n",
      "Epoch 0005 | Step 2940/2944 | Loss 0.7281 | Time 1934.8005\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3365    0.3168    0.3263      1692\n",
      "           1     0.4119    0.4501    0.4301      3126\n",
      "           2     0.4265    0.4480    0.4369      1806\n",
      "           3     0.4036    0.3670    0.3845      3422\n",
      "           4     0.2440    0.2502    0.2471      1091\n",
      "\n",
      "    accuracy                         0.3844     11137\n",
      "   macro avg     0.3645    0.3664    0.3650     11137\n",
      "weighted avg     0.3838    0.3844    0.3835     11137\n",
      "\n",
      "current macro_f1 is 0.3844, best macro_f1 is 0.0000\n",
      "time costed = 2019.87063s \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<br/>Waiting for W&B process to finish, PID 17352... <strong style=\"color:green\">(success).</strong>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "12a1911d22844ee7b5a01c5115a917e8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "VBox(children=(Label(value=' 0.00MB of 0.00MB uploaded (0.00MB deduped)\\r'), FloatProgress(value=1.0, max=1.0)…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<style>\n",
       "    table.wandb td:nth-child(1) { padding: 0 10px; text-align: right }\n",
       "    .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; width: 100% }\n",
       "    .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
       "    </style>\n",
       "<div class=\"wandb-row\"><div class=\"wandb-col\">\n",
       "<h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>▁▁▁▁▁▁▁▁▃▃▃▃▃▃▃▃▅▅▅▅▅▅▅▅▆▆▆▆▆▆▆▆████████</td></tr><tr><td>lr</td><td>▂▄███████▇▇▇▇▇▆▆▆▆▅▅▅▄▄▄▄▃▃▃▂▂▂▂▂▁▁▁▁▁▁▁</td></tr><tr><td>train_loss</td><td>▇▇█▅▅▅▅▅▅▃▄▄▃▂▃▃▁▂▃▂▁▂▁▁▁▁▁▁▁▁▂▁▂▁▁▂▂▂▁▁</td></tr><tr><td>valid_acc</td><td>▁█▆▆▅</td></tr><tr><td>valid_loss</td><td>█▁▄▁▁</td></tr></table><br/></div><div class=\"wandb-col\">\n",
       "<h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>5</td></tr><tr><td>lr</td><td>0.0</td></tr><tr><td>train_loss</td><td>0.74977</td></tr><tr><td>valid_acc</td><td>0.38439</td></tr><tr><td>valid_loss</td><td>1.81203</td></tr></table>\n",
       "</div></div>\n",
       "Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)\n",
       "<br/>Synced <strong style=\"color:#cdcd00\">roberta-forwardUt</strong>: <a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/1m9wr3tr\" target=\"_blank\">https://wandb.ai/qftie/cped-emo-cls/runs/1m9wr3tr</a><br/>\n",
       "Find logs at: <code>./wandb/run-20220304_173926-1m9wr3tr/logs</code><br/>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 训练和验证评估\n",
    "train_and_eval(model, train_loader, valid_loader, optimizer, scheduler, DEVICE, EPOCHS)\n",
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载最优模型测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3853    0.4760    0.4259      6470\n",
      "           1     0.4109    0.4211    0.4159      7991\n",
      "           2     0.3388    0.4186    0.3745      3466\n",
      "           3     0.3125    0.2785    0.2945      5717\n",
      "           4     0.2994    0.1481    0.1982      3794\n",
      "\n",
      "    accuracy                         0.3663     27438\n",
      "   macro avg     0.3494    0.3485    0.3418     27438\n",
      "weighted avg     0.3598    0.3663    0.3576     27438\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 加载最优权重对测试集测试\n",
    "model.load_state_dict(torch.load(\"best_bert_model.bin\"))\n",
    "pred_test = evaluate(model, test_loader, DEVICE)\n",
    "# print(\"\\n Test Accuracy = {} \\n\".format(accuracy_score(test_label, pred_test)))\n",
    "# print(classification_report(test_label, pred_test, digits=4))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "# output_dir = 'output/ch-roberta-dorwardUt'\n",
    "# os.makedirs(output_dir, exist_ok=True)\n",
    "# torch.save(model.state_dict(), output_dir+\"/pytorch_model.bin\")\n",
    "# # torch.save(model, \"pytorch_model_whole.bin\")"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "49e5bd26eb5fb0a8ffb649f62262c127e261ba51230dc2578599ab5938abf7ca"
  },
  "kernelspec": {
   "display_name": "Python 3.8.0 64-bit ('torch17py38': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
