{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mqftie\u001b[0m (use `wandb login --relogin` to force relogin)\n"
     ]
    }
   ],
   "source": [
    "!wandb login"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='4,7'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mqftie\u001b[0m (use `wandb login --relogin` to force relogin)\n",
      "2022-04-01 23:34:27.641526: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-04-01 23:34:27.641580: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "Tracking run with wandb version 0.12.11"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Run data is saved locally in <code>/home/phd-fan.weiquan2/works/MMMTD_Benchmark/BERT-like-method-with-CPED/wandb/run-20220401_233425-1f62d7lq</code>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Syncing run <strong><a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/1f62d7lq\" target=\"_blank\">distinctive-bird-129</a></strong> to <a href=\"https://wandb.ai/qftie/cped-emo-cls\" target=\"_blank\">Weights & Biases</a> (<a href=\"https://wandb.me/run\" target=\"_blank\">docs</a>)<br/>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# Inside my model training code\n",
    "import wandb\n",
    "\n",
    "wandb.init(project=\"cped-emo-cls\",entity='qftie',group='bert-cls3')\n",
    "\n",
    "config = wandb.config          # Initialize config\n",
    "config.batch_size = 16          # input batch size for training (default: 64)\n",
    "config.test_batch_size = 64    # input batch size for testing (default: 1000)\n",
    "config.epochs = 5             # number of epochs to train (default: 10)\n",
    "config.lr = 2e-5               # learning rate (default: 0.01)\n",
    "config.momentum = 0.1          # SGD momentum (default: 0.5) \n",
    "config.no_cuda = False         # disables CUDA training\n",
    "config.bert_path = 'hfl/chinese-roberta-wwm-ext'\n",
    "config.exam_name = 'roberta-forwardUt'\n",
    "config.max_seq_len = 500\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-04-01 23:34:34.416500: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-04-01 23:34:34.416568: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd \n",
    "import numpy as np \n",
    "import json, time\n",
    "from tqdm import tqdm \n",
    "from sklearn.metrics import accuracy_score, classification_report, f1_score\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader, TensorDataset \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "from transformers import BertModel, BertConfig, AutoTokenizer, AdamW, get_cosine_schedule_with_warmup, AutoModelForSequenceClassification\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "bert_path = config.bert_path\n",
    "tokenizer = AutoTokenizer.from_pretrained(bert_path, truncation_side=\"left\")   # 初始化分词器"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# seed everything"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import random\n",
    "\n",
    "def set_seed(seed):\n",
    "    np.random.seed(seed)\n",
    "    random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    if torch.cuda.is_available():\n",
    "        torch.cuda.manual_seed(seed)\n",
    "        torch.cuda.manual_seed_all(seed)\n",
    "    torch.backends.cudnn.benchmark = False\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "\n",
    "set_seed(42)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预处理数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # mmmtd版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'中性情绪':0, '正向情绪':1, '负向情绪':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['情绪(粗粒度)']]\n",
    "\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = data_text[data_text['tv']<=26]\n",
    "# data_text_valid = data_text[(data_text['tv']==32) | (data_text['tv']>=39)]\n",
    "# data_text_test = data_text[((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))]\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['说话内容'].tolist()\n",
    "# train_label = data_text_train['Emotion_3'].tolist()\n",
    "\n",
    "# x_valid = data_text_valid['说话内容'].tolist()\n",
    "# valid_label = data_text_valid['Emotion_3'].tolist()\n",
    "\n",
    "# x_test = data_text_test['说话内容'].tolist()\n",
    "# test_label = data_text_test['Emotion_3'].tolist()\n",
    "\n",
    "# train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)\n",
    "# # max_length对文本做截断\n",
    "# valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=120)\n",
    "# test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPED版本数据集，使用中性，正向，负向三分类\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "emo2id = {'neutral':0, 'positive':1, 'negative':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# train_index = data_text['tv']<=26\n",
    "# valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "data_text_train['Speaker_Utterance'] = data_text_train['Speaker'] + ':' + data_text_train['Utterance']\n",
    "data_text_valid['Speaker_Utterance'] = data_text_valid['Speaker'] + ':' + data_text_valid['Utterance']\n",
    "data_text_test['Speaker_Utterance'] = data_text_test['Speaker'] + ':' + data_text_test['Utterance']\n",
    "\n",
    "# data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "x_train = (data_text_train['Speaker_Utterance']).tolist()\n",
    "train_label = [emo2id[x] for x in data_text_train['Sentiment']]\n",
    "\n",
    "x_valid = data_text_valid['Speaker_Utterance'].tolist()\n",
    "valid_label = [emo2id[x] for x in data_text_valid['Sentiment']]\n",
    "\n",
    "x_test = data_text_test['Speaker_Utterance'].tolist()\n",
    "test_label = [emo2id[x] for x in data_text_test['Sentiment']]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # CPED版本数据集，使用13分类映射后的7分类\n",
    "# # data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# # data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'happy':0,'grateful':0, 'relaxed':0, 'positive-other':0, 'neutral':1, 'anger':2, 'sadness':3, 'depress':3, 'fear':4, 'worried':4, 'astonished':5, 'disgust':6, 'negative-other':6}\n",
    "# # data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "# data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "# data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['Utterance'].tolist()\n",
    "# train_label = [emo2id[x] for x in data_text_train['Emotion']]\n",
    "\n",
    "# x_valid = data_text_valid['Utterance'].tolist()\n",
    "# valid_label = [emo2id[x] for x in data_text_valid['Emotion']]\n",
    "\n",
    "# x_test = data_text_test['Utterance'].tolist()\n",
    "# test_label = [emo2id[x] for x in data_text_test['Emotion']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 准备forward utterance作为sentence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# back_stride = int(data_text_valid['Utterance_ID'][50][-3:])\n",
    "# ' '.join(data_text_valid['Utterance'][ 50- back_stride: 50].tolist())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>TV_ID</th>\n",
       "      <th>Dialogue_ID</th>\n",
       "      <th>Utterance_ID</th>\n",
       "      <th>Speaker</th>\n",
       "      <th>Gender</th>\n",
       "      <th>Age</th>\n",
       "      <th>Neuroticism</th>\n",
       "      <th>Extraversion</th>\n",
       "      <th>Openness</th>\n",
       "      <th>Agreeableness</th>\n",
       "      <th>Conscientiousness</th>\n",
       "      <th>Scene</th>\n",
       "      <th>FacePosition_LU</th>\n",
       "      <th>FacePosition_RD</th>\n",
       "      <th>Sentiment</th>\n",
       "      <th>Emotion</th>\n",
       "      <th>DA</th>\n",
       "      <th>Utterance</th>\n",
       "      <th>Speaker_Utterance</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_000</td>\n",
       "      <td>童文洁</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>low</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>108_136</td>\n",
       "      <td>156_202</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>greeting</td>\n",
       "      <td>真巧</td>\n",
       "      <td>童文洁:真巧</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_001</td>\n",
       "      <td>童文洁</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>low</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>193_144</td>\n",
       "      <td>253_197</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>greeting</td>\n",
       "      <td>车没事了</td>\n",
       "      <td>童文洁:车没事了</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_002</td>\n",
       "      <td>刘静</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>0_0</td>\n",
       "      <td>0_0</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>answer</td>\n",
       "      <td>是你呀 没事了没事 谢谢你</td>\n",
       "      <td>刘静:是你呀 没事了没事 谢谢你</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_003</td>\n",
       "      <td>童文洁</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>low</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>0_0</td>\n",
       "      <td>0_0</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>question</td>\n",
       "      <td>没事没事 你也去春风</td>\n",
       "      <td>童文洁:没事没事 你也去春风</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1</td>\n",
       "      <td>01_001</td>\n",
       "      <td>01_001_000</td>\n",
       "      <td>刘静</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>0_0</td>\n",
       "      <td>0_0</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>answer</td>\n",
       "      <td>对</td>\n",
       "      <td>刘静:对</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   TV_ID Dialogue_ID Utterance_ID Speaker  Gender          Age Neuroticism  \\\n",
       "0      1      01_000   01_000_000     童文洁  female  middle-aged        high   \n",
       "1      1      01_000   01_000_001     童文洁  female  middle-aged        high   \n",
       "2      1      01_000   01_000_002      刘静  female  middle-aged         low   \n",
       "3      1      01_000   01_000_003     童文洁  female  middle-aged        high   \n",
       "4      1      01_001   01_001_000      刘静  female  middle-aged         low   \n",
       "\n",
       "  Extraversion Openness Agreeableness Conscientiousness        Scene  \\\n",
       "0         high      low           low              high  other-venue   \n",
       "1         high      low           low              high  other-venue   \n",
       "2         high     high          high              high  other-venue   \n",
       "3         high      low           low              high  other-venue   \n",
       "4         high     high          high              high  other-venue   \n",
       "\n",
       "  FacePosition_LU FacePosition_RD Sentiment  Emotion        DA      Utterance  \\\n",
       "0         108_136         156_202   neutral  neutral  greeting             真巧   \n",
       "1         193_144         253_197   neutral  neutral  greeting           车没事了   \n",
       "2             0_0             0_0   neutral  neutral    answer  是你呀 没事了没事 谢谢你   \n",
       "3             0_0             0_0   neutral  neutral  question     没事没事 你也去春风   \n",
       "4             0_0             0_0   neutral  neutral    answer              对   \n",
       "\n",
       "  Speaker_Utterance  \n",
       "0            童文洁:真巧  \n",
       "1          童文洁:车没事了  \n",
       "2  刘静:是你呀 没事了没事 谢谢你  \n",
       "3    童文洁:没事没事 你也去春风  \n",
       "4              刘静:对  "
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data_text_train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_len = config.max_seq_len\n",
    "\n",
    "utt_forward_train = []\n",
    "for i in range(len(data_text_train)):\n",
    "    back_stride = int(data_text_train['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_train['Speaker_Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_train['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]\n",
    "    utt_forward_train.append(utt_forward)\n",
    "\n",
    "utt_forward_valid = []\n",
    "for i in range(len(data_text_valid)):\n",
    "    back_stride = int(data_text_valid['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_valid['Speaker_Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_valid['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]\n",
    "    utt_forward_valid.append(utt_forward)\n",
    "\n",
    "utt_forward_test = []\n",
    "for i in range(len(data_text_test)):\n",
    "    back_stride = int(data_text_test['Utterance_ID'][i][-3:])\n",
    "    utt_forward = '.'.join(data_text_test['Speaker_Utterance'][ i- back_stride: i].tolist())\n",
    "    max_forward_len = max_seq_len - len(data_text_test['Utterance'][i])\n",
    "    utt_forward = utt_forward[-max_forward_len:]    \n",
    "    utt_forward_test.append(utt_forward)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['童文洁:真巧',\n",
       " '童文洁:车没事了',\n",
       " '刘静:是你呀 没事了没事 谢谢你',\n",
       " '童文洁:没事没事 你也去春风',\n",
       " '刘静:对',\n",
       " '童文洁:你孩子也在这上学',\n",
       " '刘静:对呀',\n",
       " '童文洁:真巧 我儿子也在这',\n",
       " '童文洁:你孩子几年级',\n",
       " '刘静:高三了']"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_train[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['',\n",
       " '童文洁:真巧',\n",
       " '童文洁:真巧.童文洁:车没事了',\n",
       " '童文洁:真巧.童文洁:车没事了.刘静:是你呀 没事了没事 谢谢你',\n",
       " '',\n",
       " '刘静:对',\n",
       " '刘静:对.童文洁:你孩子也在这上学',\n",
       " '',\n",
       " '童文洁:真巧 我儿子也在这',\n",
       " '童文洁:真巧 我儿子也在这.童文洁:你孩子几年级']"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "utt_forward_train[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(x_valid_forward_utt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tokenize"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# for i in range(len(x_train)):\n",
    "#     x_train[i] = utt_forward_train[i] + '[SEP]' + x_train[i]\n",
    "\n",
    "# for i in range(len(x_valid)):\n",
    "#     x_valid[i] = utt_forward_valid[i]+ '[SEP]' + x_valid[i] \n",
    "\n",
    "# for i in range(len(x_test)):\n",
    "#     x_test[i] = utt_forward_test[i] + '[SEP]' + x_test[i]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "158"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "max(map(len,x_train))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# tok = tokenizer(['分词开始','下一句话开始'],['分词完毕','下一句话完毕'],padding=True)\n",
    "# print(tok)\n",
    "# torch.mul(torch.tensor(tok['token_type_ids']).unsqueeze(2) , torch.ones((2,15,3)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_encoding = tokenizer(utt_forward_train, x_train, truncation=True, padding=True, max_length=512)\n",
    "# max_length对文本做截断\n",
    "valid_encoding = tokenizer(utt_forward_valid, x_valid, truncation=True, padding=True, max_length=512)\n",
    "test_encoding = tokenizer(utt_forward_test, x_test, truncation=True, padding=True, max_length=512)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 准备训练集，验证集，测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据集读取 转成dict形式\n",
    "class NewsDataset(Dataset):\n",
    "    def __init__(self, encodings, labels):\n",
    "        self.encodings = encodings\n",
    "        self.labels = labels\n",
    "    \n",
    "    # 读取单个样本\n",
    "    def __getitem__(self, idx):\n",
    "        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n",
    "        item['labels'] = torch.tensor(int(self.labels[idx]))\n",
    "        return item\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.labels)\n",
    "\n",
    "train_dataset = NewsDataset(train_encoding, train_label)\n",
    "valid_dataset = NewsDataset(valid_encoding, valid_label)\n",
    "test_dataset = NewsDataset(test_encoding, test_label)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## WeightedRandomSampler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# labels = [train_dataset[i]['labels'] for i in range(len(train_dataset))]\n",
    "# # labels[20:30]\n",
    "# # train_label[20:30]\n",
    "# weights = [1 if label == 2 else 2 for label in labels]\n",
    "# # len(train_dataset)\n",
    "# # weights[20:30]\n",
    "# from torch.utils.data.sampler import  WeightedRandomSampler\n",
    "# sampler = WeightedRandomSampler(weights,\\\n",
    "#                                 num_samples=70000,\\\n",
    "#                                 replacement=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载到torch的dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个读取到批量读取\n",
    "train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "\n",
    "# train_loader = DataLoader(train_dataset, batch_size=config.batch_size, sampler=sampler)\n",
    "valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=config.test_batch_size, shuffle=True)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 定义model\n",
    "class Bert_Model(nn.Module):\n",
    "    def __init__(self, bert_path, classes=3):\n",
    "        super(Bert_Model, self).__init__()\n",
    "        self.config = BertConfig.from_pretrained(bert_path)  # 导入模型超参数\n",
    "        self.bert = BertModel.from_pretrained(bert_path, )     # 加载预训练模型权重\n",
    "        self.fc = nn.Linear(self.config.hidden_size, classes)  # 直接分类\n",
    "        self.dense = nn.Linear(self.config.hidden_size, self.config.hidden_size)\n",
    "        self.activation = nn.Tanh()\n",
    "        self.pred = nn.Sequential(\n",
    "            nn.Linear(self.config.hidden_size, 64),\n",
    "            nn.Dropout(0.1),\n",
    "            nn.Linear(64, classes)\n",
    "        )\n",
    "    \n",
    "    def target_hidden_mean(self, token_type_ids, all_hidden_state):\n",
    "        # [bs, seq_len, config.hidden_size]矩阵按seq_len维度求非零元素的均值\n",
    "        ave_num = torch.count_nonzero(token_type_ids, dim=1) # [bs]\n",
    "        target_hidden_states = torch.mul(token_type_ids.unsqueeze(2),all_hidden_state) # [bs, seq_len, config.hidden_size]\n",
    "        out = torch.div(torch.sum(target_hidden_states, 1), ave_num.unsqueeze(1))\n",
    "        \n",
    "        return out\n",
    "\n",
    "        \n",
    "    def forward(self, input_ids, attention_mask=None, token_type_ids=None):\n",
    "        # token_type_ids [bs, sequence_length]\n",
    "        outputs = self.bert(input_ids, attention_mask, output_hidden_states= True, return_dict=True)\n",
    "        all_hidden_state = outputs.last_hidden_state\n",
    "        # target_hidden_states = torch.mul(token_type_ids.unsqueeze(2),outputs.last_hidden_state) # [bs, seq_len, config.hidden_size]\n",
    "\n",
    "        out_pool = self.target_hidden_mean(token_type_ids, all_hidden_state) #  [bs, config.hidden_size]\n",
    "\n",
    "        logit = self.pred(out_pool)   #  [bs, classes]\n",
    "        return logit"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 实例化bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext were not used when initializing BertModel: ['cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.weight', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.predictions.transform.LayerNorm.bias']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total parameters: 102909958, Trainable parameters: 102909958\n"
     ]
    }
   ],
   "source": [
    "def get_parameter_number(model):\n",
    "    #  打印模型参数量\n",
    "    total_num = sum(p.numel() for p in model.parameters())\n",
    "    trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "    return 'Total parameters: {}, Trainable parameters: {}'.format(total_num, trainable_num)\n",
    "\n",
    "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "EPOCHS = config.epochs\n",
    "model = Bert_Model(bert_path)\n",
    "model = nn.DataParallel(model)\n",
    "model = model.cuda()\n",
    "print(get_parameter_number(model))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 优化器定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = AdamW(model.parameters(), lr=config.lr, weight_decay=1e-4) #AdamW优化器\n",
    "scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=0.3*len(train_loader),\n",
    "                                            num_training_steps=EPOCHS*len(train_loader))\n",
    "# 学习率先线性warmup一个epoch，然后cosine式下降。\n",
    "# 这里给个小提示，一定要加warmup（学习率从0慢慢升上去），如果把warmup去掉，可能收敛不了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 准备放入多卡环境\n",
    "# model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义训练函数和验证测试函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 评估模型性能，在验证集上\n",
    "def evaluate(model, data_loader, device):\n",
    "    model.eval()\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    val_true, val_pred = [], []\n",
    "    valid_loss_sum = 0.0\n",
    "    with torch.no_grad():\n",
    "        for idx, batch in enumerate(data_loader):\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            loss = criterion(y_pred, batch['labels'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "            val_true.extend(batch['labels'].cpu().numpy().tolist())\n",
    "            valid_loss_sum += loss.item()\n",
    "            \n",
    "    print(classification_report(val_true, val_pred, digits=4))\n",
    "    return accuracy_score(val_true, val_pred), valid_loss_sum/len(data_loader), f1_score(val_true, val_pred, average='macro')  #返回accuracy, loss, f1-macro\n",
    "\n",
    "\n",
    "# 测试集没有标签，需要预测提交\n",
    "def predict(model, data_loader, device):\n",
    "    model.eval()\n",
    "    val_pred = []\n",
    "    with torch.no_grad():\n",
    "        for batch in data_loader:\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "    return val_pred\n",
    "\n",
    "\n",
    "def train_and_eval(model, train_loader, valid_loader, \n",
    "                   optimizer, scheduler, device, epoch):\n",
    "    best_acc = 0.0\n",
    "    patience = 0\n",
    "    best_loss = 100\n",
    "    best_macro_f1 = 0\n",
    "    b = 0.6\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    for i in range(epoch):\n",
    "        \"\"\"训练模型\"\"\"\n",
    "        start = time.time()\n",
    "        model.train()\n",
    "        print(\"***** Running training epoch {} *****\".format(i+1))\n",
    "        train_loss_sum = 0.0\n",
    "        for idx, batch in enumerate(train_loader):\n",
    "            ids = batch['input_ids'].to(device)\n",
    "            att = batch['attention_mask'].to(device)\n",
    "            tpe = batch['token_type_ids'].to(device)\n",
    "            y = batch['labels'].to(device)  \n",
    "            y_pred = model(ids, att, tpe)\n",
    "            loss = criterion(y_pred, y)\n",
    "            loss = (loss - b).abs() + b # This is it!\n",
    "            step_lr = np.array([param_group[\"lr\"] for param_group in optimizer.param_groups]).mean()\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            scheduler.step()   # 学习率变化\n",
    "            \n",
    "            train_loss_sum += loss.item()\n",
    "            if (idx + 1) % (len(train_loader)//20) == 0:    # 只打印五次结果\n",
    "                wandb.log({\n",
    "                            'Epoch': i+1, \n",
    "                            'train_loss': loss,\n",
    "                            'lr': step_lr\n",
    "                            })\n",
    "                print(\"Epoch {:04d} | Step {:04d}/{:04d} | Loss {:.4f} | Time {:.4f}\".format(\n",
    "                          i+1, idx+1, len(train_loader), train_loss_sum/(idx+1), time.time() - start))\n",
    "                # print(\"Learning rate = {}\".format(optimizer.state_dict()['param_groups'][0]['lr']))\n",
    "\n",
    "        \"\"\"验证模型\"\"\"\n",
    "        model.eval()\n",
    "        acc, valid_loss, valid_macro_f1 = evaluate(model, valid_loader, device)  # 验证模型的性能\n",
    "        wandb.log({'valid_acc': acc, 'valid_loss': valid_loss})\n",
    "        # 保存最优模型\n",
    "        if valid_loss < best_loss:\n",
    "            best_loss = valid_loss\n",
    "            \n",
    "\n",
    "        if valid_macro_f1 > best_macro_f1:\n",
    "            best_macro_f1 = valid_macro_f1\n",
    "            torch.save(model.state_dict(), 'output/ch-roberta-dorwardUt/pytorch_model.bin') \n",
    "            \n",
    "        \n",
    "        print(\"current macro_f1 is {:.4f}, best macro_f1 is {:.4f}\".format(valid_macro_f1, best_macro_f1))\n",
    "        print(\"time costed = {}s \\n\".format(round(time.time() - start, 5)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和验证模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***** Running training epoch 1 *****\n",
      "Epoch 0001 | Step 0294/5887 | Loss 1.0339 | Time 157.7950\n",
      "Epoch 0001 | Step 0588/5887 | Loss 0.9956 | Time 311.7070\n",
      "Epoch 0001 | Step 0882/5887 | Loss 0.9642 | Time 467.3826\n",
      "Epoch 0001 | Step 1176/5887 | Loss 0.9406 | Time 616.1076\n",
      "Epoch 0001 | Step 1470/5887 | Loss 0.9236 | Time 768.4292\n",
      "Epoch 0001 | Step 1764/5887 | Loss 0.9099 | Time 920.5655\n",
      "Epoch 0001 | Step 2058/5887 | Loss 0.9005 | Time 1073.9852\n",
      "Epoch 0001 | Step 2352/5887 | Loss 0.8904 | Time 1225.1916\n",
      "Epoch 0001 | Step 2646/5887 | Loss 0.8816 | Time 1373.1735\n",
      "Epoch 0001 | Step 2940/5887 | Loss 0.8730 | Time 1525.4600\n",
      "Epoch 0001 | Step 3234/5887 | Loss 0.8649 | Time 1678.4359\n",
      "Epoch 0001 | Step 3528/5887 | Loss 0.8567 | Time 1828.1875\n",
      "Epoch 0001 | Step 3822/5887 | Loss 0.8500 | Time 1976.8303\n",
      "Epoch 0001 | Step 4116/5887 | Loss 0.8435 | Time 2129.6733\n",
      "Epoch 0001 | Step 4410/5887 | Loss 0.8376 | Time 2282.9932\n",
      "Epoch 0001 | Step 4704/5887 | Loss 0.8326 | Time 2431.7389\n",
      "Epoch 0001 | Step 4998/5887 | Loss 0.8278 | Time 2579.8878\n",
      "Epoch 0001 | Step 5292/5887 | Loss 0.8233 | Time 2731.0989\n",
      "Epoch 0001 | Step 5586/5887 | Loss 0.8194 | Time 2881.5635\n",
      "Epoch 0001 | Step 5880/5887 | Loss 0.8151 | Time 3013.8345\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4257    0.4882    0.4548      3126\n",
      "           1     0.3897    0.3121    0.3466      1692\n",
      "           2     0.7218    0.7079    0.7148      6319\n",
      "\n",
      "    accuracy                         0.5861     11137\n",
      "   macro avg     0.5124    0.5027    0.5054     11137\n",
      "weighted avg     0.5882    0.5861    0.5859     11137\n",
      "\n",
      "current macro_f1 is 0.5054, best macro_f1 is 0.5054\n",
      "time costed = 3121.57669s \n",
      "\n",
      "***** Running training epoch 2 *****\n",
      "Epoch 0002 | Step 0294/5887 | Loss 0.7078 | Time 127.6046\n",
      "Epoch 0002 | Step 0588/5887 | Loss 0.7100 | Time 254.6782\n",
      "Epoch 0002 | Step 0882/5887 | Loss 0.7076 | Time 384.1192\n",
      "Epoch 0002 | Step 1176/5887 | Loss 0.7081 | Time 509.3949\n",
      "Epoch 0002 | Step 1470/5887 | Loss 0.7092 | Time 632.3871\n",
      "Epoch 0002 | Step 1764/5887 | Loss 0.7086 | Time 750.3264\n",
      "Epoch 0002 | Step 2058/5887 | Loss 0.7088 | Time 869.9465\n",
      "Epoch 0002 | Step 2352/5887 | Loss 0.7087 | Time 987.8604\n",
      "Epoch 0002 | Step 2646/5887 | Loss 0.7085 | Time 1107.4010\n",
      "Epoch 0002 | Step 2940/5887 | Loss 0.7084 | Time 1226.9252\n",
      "Epoch 0002 | Step 3234/5887 | Loss 0.7084 | Time 1344.7069\n",
      "Epoch 0002 | Step 3528/5887 | Loss 0.7082 | Time 1464.1492\n",
      "Epoch 0002 | Step 3822/5887 | Loss 0.7082 | Time 1583.5907\n",
      "Epoch 0002 | Step 4116/5887 | Loss 0.7081 | Time 1701.3985\n",
      "Epoch 0002 | Step 4410/5887 | Loss 0.7076 | Time 1820.8381\n",
      "Epoch 0002 | Step 4704/5887 | Loss 0.7070 | Time 1938.6043\n",
      "Epoch 0002 | Step 4998/5887 | Loss 0.7063 | Time 2058.2223\n",
      "Epoch 0002 | Step 5292/5887 | Loss 0.7058 | Time 2177.8817\n",
      "Epoch 0002 | Step 5586/5887 | Loss 0.7055 | Time 2297.3598\n",
      "Epoch 0002 | Step 5880/5887 | Loss 0.7053 | Time 2415.0226\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4561    0.4283    0.4418      3126\n",
      "           1     0.3700    0.2926    0.3267      1692\n",
      "           2     0.7070    0.7678    0.7362      6319\n",
      "\n",
      "    accuracy                         0.6003     11137\n",
      "   macro avg     0.5110    0.4962    0.5016     11137\n",
      "weighted avg     0.5853    0.6003    0.5913     11137\n",
      "\n",
      "current macro_f1 is 0.5016, best macro_f1 is 0.5054\n",
      "time costed = 2511.49673s \n",
      "\n",
      "***** Running training epoch 3 *****\n",
      "Epoch 0003 | Step 0294/5887 | Loss 0.6850 | Time 119.5797\n",
      "Epoch 0003 | Step 0588/5887 | Loss 0.6842 | Time 239.0664\n",
      "Epoch 0003 | Step 0882/5887 | Loss 0.6833 | Time 356.7869\n",
      "Epoch 0003 | Step 1176/5887 | Loss 0.6833 | Time 474.4565\n",
      "Epoch 0003 | Step 1470/5887 | Loss 0.6838 | Time 593.9352\n",
      "Epoch 0003 | Step 1764/5887 | Loss 0.6838 | Time 713.5036\n",
      "Epoch 0003 | Step 2058/5887 | Loss 0.6836 | Time 833.1256\n",
      "Epoch 0003 | Step 2352/5887 | Loss 0.6832 | Time 952.5518\n",
      "Epoch 0003 | Step 2646/5887 | Loss 0.6833 | Time 1072.1181\n",
      "Epoch 0003 | Step 2940/5887 | Loss 0.6838 | Time 1191.6278\n",
      "Epoch 0003 | Step 3234/5887 | Loss 0.6837 | Time 1309.4364\n",
      "Epoch 0003 | Step 3528/5887 | Loss 0.6835 | Time 1428.9772\n",
      "Epoch 0003 | Step 3822/5887 | Loss 0.6832 | Time 1548.5077\n",
      "Epoch 0003 | Step 4116/5887 | Loss 0.6827 | Time 1666.2077\n",
      "Epoch 0003 | Step 4410/5887 | Loss 0.6824 | Time 1785.6462\n",
      "Epoch 0003 | Step 4704/5887 | Loss 0.6826 | Time 1903.3605\n",
      "Epoch 0003 | Step 4998/5887 | Loss 0.6822 | Time 2022.9033\n",
      "Epoch 0003 | Step 5292/5887 | Loss 0.6818 | Time 2140.5800\n",
      "Epoch 0003 | Step 5586/5887 | Loss 0.6816 | Time 2260.1039\n",
      "Epoch 0003 | Step 5880/5887 | Loss 0.6812 | Time 2377.7670\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3874    0.5752    0.4630      3126\n",
      "           1     0.4063    0.2128    0.2793      1692\n",
      "           2     0.7253    0.6439    0.6822      6319\n",
      "\n",
      "    accuracy                         0.5591     11137\n",
      "   macro avg     0.5063    0.4773    0.4748     11137\n",
      "weighted avg     0.5820    0.5591    0.5595     11137\n",
      "\n",
      "current macro_f1 is 0.4748, best macro_f1 is 0.5054\n",
      "time costed = 2474.22307s \n",
      "\n",
      "***** Running training epoch 4 *****\n",
      "Epoch 0004 | Step 0294/5887 | Loss 0.6730 | Time 119.5580\n",
      "Epoch 0004 | Step 0588/5887 | Loss 0.6717 | Time 239.0519\n",
      "Epoch 0004 | Step 0882/5887 | Loss 0.6717 | Time 356.8700\n",
      "Epoch 0004 | Step 1176/5887 | Loss 0.6691 | Time 474.6335\n",
      "Epoch 0004 | Step 1470/5887 | Loss 0.6684 | Time 594.1469\n",
      "Epoch 0004 | Step 1764/5887 | Loss 0.6684 | Time 711.8046\n",
      "Epoch 0004 | Step 2058/5887 | Loss 0.6681 | Time 831.5349\n",
      "Epoch 0004 | Step 2352/5887 | Loss 0.6670 | Time 951.0699\n",
      "Epoch 0004 | Step 2646/5887 | Loss 0.6671 | Time 1070.5749\n",
      "Epoch 0004 | Step 2940/5887 | Loss 0.6668 | Time 1190.1629\n",
      "Epoch 0004 | Step 3234/5887 | Loss 0.6666 | Time 1307.8782\n",
      "Epoch 0004 | Step 3528/5887 | Loss 0.6661 | Time 1427.3617\n",
      "Epoch 0004 | Step 3822/5887 | Loss 0.6656 | Time 1545.0854\n",
      "Epoch 0004 | Step 4116/5887 | Loss 0.6655 | Time 1664.5949\n",
      "Epoch 0004 | Step 4410/5887 | Loss 0.6649 | Time 1782.2108\n",
      "Epoch 0004 | Step 4704/5887 | Loss 0.6649 | Time 1901.4827\n",
      "Epoch 0004 | Step 4998/5887 | Loss 0.6652 | Time 2020.9692\n",
      "Epoch 0004 | Step 5292/5887 | Loss 0.6647 | Time 2138.6452\n",
      "Epoch 0004 | Step 5586/5887 | Loss 0.6646 | Time 2256.2485\n",
      "Epoch 0004 | Step 5880/5887 | Loss 0.6644 | Time 2375.6786\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4088    0.5448    0.4671      3126\n",
      "           1     0.3769    0.2778    0.3198      1692\n",
      "           2     0.7331    0.6640    0.6968      6319\n",
      "\n",
      "    accuracy                         0.5719     11137\n",
      "   macro avg     0.5062    0.4955    0.4946     11137\n",
      "weighted avg     0.5879    0.5719    0.5751     11137\n",
      "\n",
      "current macro_f1 is 0.4946, best macro_f1 is 0.5054\n",
      "time costed = 2473.72335s \n",
      "\n",
      "***** Running training epoch 5 *****\n",
      "Epoch 0005 | Step 0294/5887 | Loss 0.6555 | Time 119.5259\n",
      "Epoch 0005 | Step 0588/5887 | Loss 0.6540 | Time 240.6831\n",
      "Epoch 0005 | Step 0882/5887 | Loss 0.6547 | Time 358.4138\n",
      "Epoch 0005 | Step 1176/5887 | Loss 0.6560 | Time 477.7710\n",
      "Epoch 0005 | Step 1470/5887 | Loss 0.6552 | Time 595.3956\n",
      "Epoch 0005 | Step 1764/5887 | Loss 0.6555 | Time 713.1150\n",
      "Epoch 0005 | Step 2058/5887 | Loss 0.6550 | Time 832.5233\n",
      "Epoch 0005 | Step 2352/5887 | Loss 0.6543 | Time 952.0434\n",
      "Epoch 0005 | Step 2646/5887 | Loss 0.6542 | Time 1069.6881\n",
      "Epoch 0005 | Step 2940/5887 | Loss 0.6541 | Time 1189.1566\n",
      "Epoch 0005 | Step 3234/5887 | Loss 0.6541 | Time 1306.8868\n",
      "Epoch 0005 | Step 3528/5887 | Loss 0.6541 | Time 1424.5090\n",
      "Epoch 0005 | Step 3822/5887 | Loss 0.6542 | Time 1543.9469\n",
      "Epoch 0005 | Step 4116/5887 | Loss 0.6544 | Time 1661.6829\n",
      "Epoch 0005 | Step 4410/5887 | Loss 0.6544 | Time 1781.0658\n",
      "Epoch 0005 | Step 4704/5887 | Loss 0.6543 | Time 1898.8364\n",
      "Epoch 0005 | Step 4998/5887 | Loss 0.6541 | Time 2018.3800\n",
      "Epoch 0005 | Step 5292/5887 | Loss 0.6541 | Time 2137.8353\n",
      "Epoch 0005 | Step 5586/5887 | Loss 0.6540 | Time 2255.5392\n",
      "Epoch 0005 | Step 5880/5887 | Loss 0.6539 | Time 2373.2159\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4123    0.5441    0.4691      3126\n",
      "           1     0.3889    0.2648    0.3150      1692\n",
      "           2     0.7288    0.6757    0.7013      6319\n",
      "\n",
      "    accuracy                         0.5764     11137\n",
      "   macro avg     0.5100    0.4949    0.4951     11137\n",
      "weighted avg     0.5883    0.5764    0.5774     11137\n",
      "\n",
      "current macro_f1 is 0.4951, best macro_f1 is 0.5054\n",
      "time costed = 2471.42809s \n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 训练和验证评估\n",
    "train_and_eval(model, train_loader, valid_loader, optimizer, scheduler, DEVICE, EPOCHS)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载最优模型测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3840    0.3689    0.3763      7991\n",
      "           1     0.4263    0.3927    0.4088      6470\n",
      "           2     0.6148    0.6538    0.6337     12977\n",
      "\n",
      "    accuracy                         0.5093     27438\n",
      "   macro avg     0.4750    0.4718    0.4729     27438\n",
      "weighted avg     0.5031    0.5093    0.5057     27438\n",
      "\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "Waiting for W&B process to finish... <strong style=\"color:green\">(success).</strong>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "bf111e8263e2418dae29e33640fdadb1",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "VBox(children=(Label(value='0.001 MB of 0.001 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<style>\n",
       "    table.wandb td:nth-child(1) { padding: 0 10px; text-align: right }\n",
       "    .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; width: 100% }\n",
       "    .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
       "    </style>\n",
       "<div class=\"wandb-row\"><div class=\"wandb-col\"><h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>▁▁▁▁▁▁▁▁▃▃▃▃▃▃▃▃▅▅▅▅▅▅▅▅▆▆▆▆▆▆▆▆████████</td></tr><tr><td>lr</td><td>▂▄███████▇▇▇▇▇▆▆▆▆▅▅▅▄▄▄▄▃▃▃▂▂▂▂▂▁▁▁▁▁▁▁</td></tr><tr><td>train_loss</td><td>█▇▇▇▄▅▄▅▃▃▁▂▆▂▃▁▁▂▂▄▇▁▂▂▃▂▁▃▂▃▇▂▁▃▃▂▄▃▁▁</td></tr><tr><td>valid_acc</td><td>▆█▁▃▄</td></tr><tr><td>valid_loss</td><td>▂▁▆▇█</td></tr></table><br/></div><div class=\"wandb-col\"><h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>5</td></tr><tr><td>lr</td><td>0.0</td></tr><tr><td>train_loss</td><td>0.62207</td></tr><tr><td>valid_acc</td><td>0.57637</td></tr><tr><td>valid_loss</td><td>0.92114</td></tr></table><br/></div></div>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Synced <strong style=\"color:#cdcd00\">distinctive-bird-129</strong>: <a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/1f62d7lq\" target=\"_blank\">https://wandb.ai/qftie/cped-emo-cls/runs/1f62d7lq</a><br/>Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Find logs at: <code>./wandb/run-20220401_233425-1f62d7lq/logs</code>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 加载最优权重对测试集测试\n",
    "model.load_state_dict(torch.load(\"output/ch-roberta-dorwardUt/pytorch_model.bin\"))\n",
    "pred_test = evaluate(model, test_loader, DEVICE)\n",
    "# print(\"\\n Test Accuracy = {} \\n\".format(accuracy_score(test_label, pred_test)))\n",
    "# print(classification_report(test_label, pred_test, digits=4))\n",
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "# output_dir = 'output/ch-roberta-dorwardUt'\n",
    "# os.makedirs(output_dir, exist_ok=True)\n",
    "# torch.save(model.state_dict(), output_dir+\"/pytorch_model.bin\")\n",
    "# # torch.save(model, \"pytorch_model_whole.bin\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.cuda.empty_cache()\n",
    "torch.cuda.empty_cache()\n",
    "torch.cuda.empty_cache()"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "49e5bd26eb5fb0a8ffb649f62262c127e261ba51230dc2578599ab5938abf7ca"
  },
  "kernelspec": {
   "display_name": "Python 3.8.0 64-bit ('torch17py38': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
