{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34m\u001b[1mwandb\u001b[0m: W&B API key is configured (use `wandb login --relogin` to force relogin)\n"
     ]
    }
   ],
   "source": [
    "!wandb login"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='2,3'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n",
      "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mqftie\u001b[0m (use `wandb login --relogin` to force relogin)\n",
      "2022-05-09 16:20:46.605315: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-05-09 16:20:46.605349: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "wandb version 0.12.16 is available!  To upgrade, please run:\n",
       " $ pip install wandb --upgrade"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Tracking run with wandb version 0.12.11"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Run data is saved locally in <code>/home/phd-fan.weiquan2/works/MMMTD_Benchmark/BERT-like-method-with-CPED/wandb/run-20220509_162042-yyrvwcwa</code>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Syncing run <strong><a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/yyrvwcwa\" target=\"_blank\">eternal-music-8</a></strong> to <a href=\"https://wandb.ai/qftie/cped-emo-cls\" target=\"_blank\">Weights & Biases</a> (<a href=\"https://wandb.me/run\" target=\"_blank\">docs</a>)<br/>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# Inside my model training code\n",
    "import wandb\n",
    "\n",
    "wandb.init(project=\"cped-emo-cls\",entity='qftie',group='bert-cls3')\n",
    "\n",
    "config = wandb.config          # Initialize config\n",
    "config.batch_size = 32          # input batch size for training (default: 64)\n",
    "config.test_batch_size = 64    # input batch size for testing (default: 1000)\n",
    "config.epochs = 5             # number of epochs to train (default: 10)\n",
    "config.lr = 2e-5               # learning rate (default: 0.01)\n",
    "config.momentum = 0.1          # SGD momentum (default: 0.5) \n",
    "config.no_cuda = False         # disables CUDA training\n",
    "config.bert_path = 'hfl/chinese-roberta-wwm-ext'\n",
    "config.exam_name = 'roberta-forwardUt'\n",
    "config.max_seq_len = 500\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-05-09 16:21:05.658688: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-05-09 16:21:05.658745: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd \n",
    "import numpy as np \n",
    "import json, time\n",
    "from tqdm import tqdm \n",
    "from sklearn.metrics import accuracy_score, classification_report, f1_score\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader, TensorDataset \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\n",
    "from transformers import BertModel, BertConfig, AutoTokenizer, AdamW, get_cosine_schedule_with_warmup, AutoModelForSequenceClassification\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n",
    "bert_path = config.bert_path\n",
    "tokenizer = AutoTokenizer.from_pretrained(bert_path, truncation_side=\"left\")   # 初始化分词器"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# seed everything"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import random\n",
    "\n",
    "def set_seed(seed):\n",
    "    np.random.seed(seed)\n",
    "    random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    if torch.cuda.is_available():\n",
    "        torch.cuda.manual_seed(seed)\n",
    "        torch.cuda.manual_seed_all(seed)\n",
    "    torch.backends.cudnn.benchmark = False\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "\n",
    "set_seed(42)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 预处理数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # mmmtd版本\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'中性情绪':0, '正向情绪':1, '负向情绪':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['情绪(粗粒度)']]\n",
    "\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = data_text[data_text['tv']<=26]\n",
    "# data_text_valid = data_text[(data_text['tv']==32) | (data_text['tv']>=39)]\n",
    "# data_text_test = data_text[((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))]\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['说话内容'].tolist()\n",
    "# train_label = data_text_train['Emotion_3'].tolist()\n",
    "\n",
    "# x_valid = data_text_valid['说话内容'].tolist()\n",
    "# valid_label = data_text_valid['Emotion_3'].tolist()\n",
    "\n",
    "# x_test = data_text_test['说话内容'].tolist()\n",
    "# test_label = data_text_test['Emotion_3'].tolist()\n",
    "\n",
    "# train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=120)\n",
    "# # max_length对文本做截断\n",
    "# valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=120)\n",
    "# test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# CPED版本数据集，使用中性，正向，负向三分类\n",
    "# data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "emo2id = {'neutral':0, 'positive':1, 'negative':2}\n",
    "# data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# train_index = data_text['tv']<=26\n",
    "# valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# data_text_train['Speaker_Utterance'] = data_text_train['Speaker'] + ':' + data_text_train['Utterance']\n",
    "# data_text_valid['Speaker_Utterance'] = data_text_valid['Speaker'] + ':' + data_text_valid['Utterance']\n",
    "# data_text_test['Speaker_Utterance'] = data_text_test['Speaker'] + ':' + data_text_test['Utterance']\n",
    "\n",
    "# data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "x_train = (data_text_train['Utterance']).tolist()\n",
    "train_label = [emo2id[x] for x in data_text_train['Sentiment']]\n",
    "\n",
    "x_valid = data_text_valid['Utterance'].tolist()\n",
    "valid_label = [emo2id[x] for x in data_text_valid['Sentiment']]\n",
    "\n",
    "x_test = data_text_test['Utterance'].tolist()\n",
    "test_label = [emo2id[x] for x in data_text_test['Sentiment']]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # CPED版本数据集，使用13分类映射后的7分类\n",
    "# # data_text = pd.read_csv('/148Dataset/data-tie.qianfeng/CPED/data_with_personality.csv')\n",
    "# # data_text['tv'] = [int(s[:2]) for s in data_text['vvid']]\n",
    "# emo2id = {'happy':0,'grateful':0, 'relaxed':0, 'positive-other':0, 'neutral':1, 'anger':2, 'sadness':3, 'depress':3, 'fear':4, 'worried':4, 'astonished':5, 'disgust':6, 'negative-other':6}\n",
    "# # data_text['Emotion_3'] = [emo2id[x] for x in data_text['Sentiment']]\n",
    "\n",
    "# # train_index = data_text['tv']<=26\n",
    "# # valid_index = (data_text['tv']==32) | (data_text['tv']>=39)\n",
    "# # test_index = ((data_text['tv']>=27) & (data_text['tv']<32)) | ((data_text['tv']>32) & (data_text['tv']<=37))\n",
    "\n",
    "# data_text_train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "# data_text_valid = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "# data_text_test = pd.read_csv('/148Dataset/Dataset/CPED/test_split.csv')\n",
    "\n",
    "# # data_text_train = data_text_train.sample(frac=1, random_state=2021)\n",
    "# # data_text_valid = data_text_valid.sample(frac=1, random_state=2021)\n",
    "# # data_text_test = data_text_test.sample(frac=1, random_state=2021)\n",
    "\n",
    "# x_train = data_text_train['Utterance'].tolist()\n",
    "# train_label = [emo2id[x] for x in data_text_train['Emotion']]\n",
    "\n",
    "# x_valid = data_text_valid['Utterance'].tolist()\n",
    "# valid_label = [emo2id[x] for x in data_text_valid['Emotion']]\n",
    "\n",
    "# x_test = data_text_test['Utterance'].tolist()\n",
    "# test_label = [emo2id[x] for x in data_text_test['Emotion']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 准备forward utterance作为sentence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# back_stride = int(data_text_valid['Utterance_ID'][50][-3:])\n",
    "# ' '.join(data_text_valid['Utterance'][ 50- back_stride: 50].tolist())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>TV_ID</th>\n",
       "      <th>Dialogue_ID</th>\n",
       "      <th>Utterance_ID</th>\n",
       "      <th>Speaker</th>\n",
       "      <th>Gender</th>\n",
       "      <th>Age</th>\n",
       "      <th>Neuroticism</th>\n",
       "      <th>Extraversion</th>\n",
       "      <th>Openness</th>\n",
       "      <th>Agreeableness</th>\n",
       "      <th>Conscientiousness</th>\n",
       "      <th>Scene</th>\n",
       "      <th>FacePosition_LU</th>\n",
       "      <th>FacePosition_RD</th>\n",
       "      <th>Sentiment</th>\n",
       "      <th>Emotion</th>\n",
       "      <th>DA</th>\n",
       "      <th>Utterance</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_000</td>\n",
       "      <td>童文洁</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>low</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>108_136</td>\n",
       "      <td>156_202</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>greeting</td>\n",
       "      <td>真巧</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_001</td>\n",
       "      <td>童文洁</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>low</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>193_144</td>\n",
       "      <td>253_197</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>greeting</td>\n",
       "      <td>车没事了</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_002</td>\n",
       "      <td>刘静</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>0_0</td>\n",
       "      <td>0_0</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>answer</td>\n",
       "      <td>是你呀 没事了没事 谢谢你</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1</td>\n",
       "      <td>01_000</td>\n",
       "      <td>01_000_003</td>\n",
       "      <td>童文洁</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>low</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>0_0</td>\n",
       "      <td>0_0</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>question</td>\n",
       "      <td>没事没事 你也去春风</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1</td>\n",
       "      <td>01_001</td>\n",
       "      <td>01_001_000</td>\n",
       "      <td>刘静</td>\n",
       "      <td>female</td>\n",
       "      <td>middle-aged</td>\n",
       "      <td>low</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>high</td>\n",
       "      <td>other-venue</td>\n",
       "      <td>0_0</td>\n",
       "      <td>0_0</td>\n",
       "      <td>neutral</td>\n",
       "      <td>neutral</td>\n",
       "      <td>answer</td>\n",
       "      <td>对</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   TV_ID Dialogue_ID Utterance_ID Speaker  Gender          Age Neuroticism  \\\n",
       "0      1      01_000   01_000_000     童文洁  female  middle-aged        high   \n",
       "1      1      01_000   01_000_001     童文洁  female  middle-aged        high   \n",
       "2      1      01_000   01_000_002      刘静  female  middle-aged         low   \n",
       "3      1      01_000   01_000_003     童文洁  female  middle-aged        high   \n",
       "4      1      01_001   01_001_000      刘静  female  middle-aged         low   \n",
       "\n",
       "  Extraversion Openness Agreeableness Conscientiousness        Scene  \\\n",
       "0         high      low           low              high  other-venue   \n",
       "1         high      low           low              high  other-venue   \n",
       "2         high     high          high              high  other-venue   \n",
       "3         high      low           low              high  other-venue   \n",
       "4         high     high          high              high  other-venue   \n",
       "\n",
       "  FacePosition_LU FacePosition_RD Sentiment  Emotion        DA      Utterance  \n",
       "0         108_136         156_202   neutral  neutral  greeting             真巧  \n",
       "1         193_144         253_197   neutral  neutral  greeting           车没事了  \n",
       "2             0_0             0_0   neutral  neutral    answer  是你呀 没事了没事 谢谢你  \n",
       "3             0_0             0_0   neutral  neutral  question     没事没事 你也去春风  \n",
       "4             0_0             0_0   neutral  neutral    answer              对  "
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data_text_train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_len = config.max_seq_len\n",
    "\n",
    "# utt_forward_train = []\n",
    "# for i in range(len(data_text_train)):\n",
    "#     back_stride = int(data_text_train['Utterance_ID'][i][-3:])\n",
    "#     utt_forward = '.'.join(data_text_train['Speaker_Utterance'][ i- back_stride: i].tolist())\n",
    "#     max_forward_len = max_seq_len - len(data_text_train['Utterance'][i])\n",
    "#     utt_forward = utt_forward[-max_forward_len:]\n",
    "#     utt_forward_train.append(utt_forward)\n",
    "\n",
    "# utt_forward_valid = []\n",
    "# for i in range(len(data_text_valid)):\n",
    "#     back_stride = int(data_text_valid['Utterance_ID'][i][-3:])\n",
    "#     utt_forward = '.'.join(data_text_valid['Speaker_Utterance'][ i- back_stride: i].tolist())\n",
    "#     max_forward_len = max_seq_len - len(data_text_valid['Utterance'][i])\n",
    "#     utt_forward = utt_forward[-max_forward_len:]\n",
    "#     utt_forward_valid.append(utt_forward)\n",
    "\n",
    "# utt_forward_test = []\n",
    "# for i in range(len(data_text_test)):\n",
    "#     back_stride = int(data_text_test['Utterance_ID'][i][-3:])\n",
    "#     utt_forward = '.'.join(data_text_test['Speaker_Utterance'][ i- back_stride: i].tolist())\n",
    "#     max_forward_len = max_seq_len - len(data_text_test['Utterance'][i])\n",
    "#     utt_forward = utt_forward[-max_forward_len:]    \n",
    "#     utt_forward_test.append(utt_forward)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['真巧',\n",
       " '车没事了',\n",
       " '是你呀 没事了没事 谢谢你',\n",
       " '没事没事 你也去春风',\n",
       " '对',\n",
       " '你孩子也在这上学',\n",
       " '对呀',\n",
       " '真巧 我儿子也在这',\n",
       " '你孩子几年级',\n",
       " '高三了']"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_train[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# utt_forward_train[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# print(x_valid_forward_utt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tokenize"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# for i in range(len(x_train)):\n",
    "#     x_train[i] = utt_forward_train[i] + '[SEP]' + x_train[i]\n",
    "\n",
    "# for i in range(len(x_valid)):\n",
    "#     x_valid[i] = utt_forward_valid[i]+ '[SEP]' + x_valid[i] \n",
    "\n",
    "# for i in range(len(x_test)):\n",
    "#     x_test[i] = utt_forward_test[i] + '[SEP]' + x_test[i]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "155"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "max(map(len,x_train))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_encoding = tokenizer(x_train, truncation=True, padding=True, max_length=512)\n",
    "# max_length对文本做截断\n",
    "valid_encoding = tokenizer(x_valid, truncation=True, padding=True, max_length=512)\n",
    "test_encoding = tokenizer(x_test, truncation=True, padding=True, max_length=512)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 准备训练集，验证集，测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据集读取 转成dict形式\n",
    "class NewsDataset(Dataset):\n",
    "    def __init__(self, encodings, labels):\n",
    "        self.encodings = encodings\n",
    "        self.labels = labels\n",
    "    \n",
    "    # 读取单个样本\n",
    "    def __getitem__(self, idx):\n",
    "        item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n",
    "        item['labels'] = torch.tensor(int(self.labels[idx]))\n",
    "        return item\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.labels)\n",
    "\n",
    "train_dataset = NewsDataset(train_encoding, train_label)\n",
    "valid_dataset = NewsDataset(valid_encoding, valid_label)\n",
    "test_dataset = NewsDataset(test_encoding, test_label)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## WeightedRandomSampler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# labels = [train_dataset[i]['labels'] for i in range(len(train_dataset))]\n",
    "# # labels[20:30]\n",
    "# # train_label[20:30]\n",
    "# weights = [1 if label == 2 else 2 for label in labels]\n",
    "# # len(train_dataset)\n",
    "# # weights[20:30]\n",
    "# from torch.utils.data.sampler import  WeightedRandomSampler\n",
    "# sampler = WeightedRandomSampler(weights,\\\n",
    "#                                 num_samples=70000,\\\n",
    "#                                 replacement=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载到torch的dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个读取到批量读取\n",
    "train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "\n",
    "# train_loader = DataLoader(train_dataset, batch_size=config.batch_size, sampler=sampler)\n",
    "valid_loader = DataLoader(valid_dataset, batch_size=config.batch_size, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=config.test_batch_size, shuffle=True)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 定义model\n",
    "class Bert_Model(nn.Module):\n",
    "    def __init__(self, bert_path, classes=3):\n",
    "        super(Bert_Model, self).__init__()\n",
    "        self.config = BertConfig.from_pretrained(bert_path)  # 导入模型超参数\n",
    "        self.bert = BertModel.from_pretrained(bert_path, )     # 加载预训练模型权重\n",
    "        self.fc = nn.Linear(self.config.hidden_size, classes)  # 直接分类\n",
    "        self.dense = nn.Linear(self.config.hidden_size, self.config.hidden_size)\n",
    "        self.activation = nn.Tanh()\n",
    "        self.pred = nn.Sequential(\n",
    "            nn.Linear(self.config.hidden_size, 64),\n",
    "            nn.Dropout(0.1),\n",
    "            nn.Linear(64, classes)\n",
    "        )\n",
    "\n",
    "\n",
    "        \n",
    "        \n",
    "    def forward(self, input_ids, attention_mask=None, token_type_ids=None):\n",
    "        outputs = self.bert(input_ids, attention_mask, token_type_ids, output_hidden_states= True, return_dict=True)\n",
    "        # out_pool = outputs[1]   # 池化后的输出 [bs, config.hidden_size]\n",
    "        out_pool = torch.mean(outputs.last_hidden_state, 1)\n",
    "        # out_pool = self.dense(out_pool)\n",
    "        # out_pool = self.activation(out_pool)\n",
    "        logit = self.pred(out_pool)   #  [bs, classes]\n",
    "        return logit"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 实例化bert模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext were not used when initializing BertModel: ['cls.predictions.transform.LayerNorm.weight', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.seq_relationship.weight', 'cls.predictions.bias']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total parameters: 102909958, Trainable parameters: 102909958\n"
     ]
    }
   ],
   "source": [
    "def get_parameter_number(model):\n",
    "    #  打印模型参数量\n",
    "    total_num = sum(p.numel() for p in model.parameters())\n",
    "    trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "    return 'Total parameters: {}, Trainable parameters: {}'.format(total_num, trainable_num)\n",
    "\n",
    "DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "EPOCHS = config.epochs\n",
    "model = Bert_Model(bert_path)\n",
    "model = nn.DataParallel(model)\n",
    "model = model.cuda()\n",
    "print(get_parameter_number(model))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 优化器定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = AdamW(model.parameters(), lr=config.lr, weight_decay=1e-4) #AdamW优化器\n",
    "scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=0.3*len(train_loader),\n",
    "                                            num_training_steps=EPOCHS*len(train_loader))\n",
    "# 学习率先线性warmup一个epoch，然后cosine式下降。\n",
    "# 这里给个小提示，一定要加warmup（学习率从0慢慢升上去），如果把warmup去掉，可能收敛不了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 准备放入多卡环境\n",
    "# model, optimizer, train_loader = accelerator.prepare(model, optimizer, train_loader)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义训练函数和验证测试函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 评估模型性能，在验证集上\n",
    "def evaluate(model, data_loader, device):\n",
    "    model.eval()\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    val_true, val_pred = [], []\n",
    "    valid_loss_sum = 0.0\n",
    "    with torch.no_grad():\n",
    "        for idx, batch in enumerate(data_loader):\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            loss = criterion(y_pred, batch['labels'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "            val_true.extend(batch['labels'].cpu().numpy().tolist())\n",
    "            valid_loss_sum += loss.item()\n",
    "            \n",
    "    print(classification_report(val_true, val_pred, digits=4))\n",
    "    return accuracy_score(val_true, val_pred), valid_loss_sum/len(data_loader), f1_score(val_true, val_pred, average='macro')  #返回accuracy, loss, f1-macro\n",
    "\n",
    "\n",
    "# 测试集没有标签，需要预测提交\n",
    "def predict(model, data_loader, device):\n",
    "    model.eval()\n",
    "    val_pred = []\n",
    "    with torch.no_grad():\n",
    "        for batch in data_loader:\n",
    "            y_pred = model(batch['input_ids'].to(device), batch['attention_mask'].to(device), batch['token_type_ids'].to(device))\n",
    "            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()\n",
    "            val_pred.extend(y_pred)\n",
    "    return val_pred\n",
    "\n",
    "\n",
    "def train_and_eval(model, train_loader, valid_loader, \n",
    "                   optimizer, scheduler, device, epoch):\n",
    "    best_acc = 0.0\n",
    "    patience = 0\n",
    "    best_loss = 100\n",
    "    best_macro_f1 = 0\n",
    "    b = 0.6\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    for i in range(epoch):\n",
    "        \"\"\"训练模型\"\"\"\n",
    "        start = time.time()\n",
    "        model.train()\n",
    "        print(\"***** Running training epoch {} *****\".format(i+1))\n",
    "        train_loss_sum = 0.0\n",
    "        for idx, batch in enumerate(train_loader):\n",
    "            ids = batch['input_ids'].to(device)\n",
    "            att = batch['attention_mask'].to(device)\n",
    "            tpe = batch['token_type_ids'].to(device)\n",
    "            y = batch['labels'].to(device)  \n",
    "            y_pred = model(ids, att, tpe)\n",
    "            loss = criterion(y_pred, y)\n",
    "            loss = (loss - b).abs() + b # This is it!\n",
    "            step_lr = np.array([param_group[\"lr\"] for param_group in optimizer.param_groups]).mean()\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            scheduler.step()   # 学习率变化\n",
    "            \n",
    "            train_loss_sum += loss.item()\n",
    "            if (idx + 1) % (len(train_loader)//20) == 0:    # 只打印五次结果\n",
    "                wandb.log({\n",
    "                            'Epoch': i+1, \n",
    "                            'train_loss': loss,\n",
    "                            'lr': step_lr\n",
    "                            })\n",
    "                print(\"Epoch {:04d} | Step {:04d}/{:04d} | Loss {:.4f} | Time {:.4f}\".format(\n",
    "                          i+1, idx+1, len(train_loader), train_loss_sum/(idx+1), time.time() - start))\n",
    "                # print(\"Learning rate = {}\".format(optimizer.state_dict()['param_groups'][0]['lr']))\n",
    "\n",
    "        \"\"\"验证模型\"\"\"\n",
    "        model.eval()\n",
    "        acc, valid_loss, valid_macro_f1 = evaluate(model, valid_loader, device)  # 验证模型的性能\n",
    "        wandb.log({'valid_acc': acc, 'valid_loss': valid_loss})\n",
    "        # 保存最优模型\n",
    "        if valid_loss < best_loss:\n",
    "            best_loss = valid_loss\n",
    "            \n",
    "\n",
    "        if valid_macro_f1 > best_macro_f1:\n",
    "            best_macro_f1 = valid_macro_f1\n",
    "            torch.save(model.state_dict(), 'output/ch-roberta-dorwardUt/pytorch_model.bin') \n",
    "            \n",
    "        \n",
    "        print(\"current macro_f1 is {:.4f}, best macro_f1 is {:.4f}\".format(valid_macro_f1, best_macro_f1))\n",
    "        print(\"time costed = {}s \\n\".format(round(time.time() - start, 5)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练和验证模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "***** Running training epoch 1 *****\n",
      "Epoch 0001 | Step 0147/2944 | Loss 1.0560 | Time 36.6259\n",
      "Epoch 0001 | Step 0294/2944 | Loss 1.0224 | Time 68.6368\n",
      "Epoch 0001 | Step 0441/2944 | Loss 1.0049 | Time 101.2907\n",
      "Epoch 0001 | Step 0588/2944 | Loss 0.9940 | Time 134.0264\n",
      "Epoch 0001 | Step 0735/2944 | Loss 0.9843 | Time 166.2076\n",
      "Epoch 0001 | Step 0882/2944 | Loss 0.9787 | Time 198.9985\n",
      "Epoch 0001 | Step 1029/2944 | Loss 0.9758 | Time 231.1862\n",
      "Epoch 0001 | Step 1176/2944 | Loss 0.9725 | Time 264.1201\n",
      "Epoch 0001 | Step 1323/2944 | Loss 0.9702 | Time 296.3375\n",
      "Epoch 0001 | Step 1470/2944 | Loss 0.9676 | Time 329.3146\n",
      "Epoch 0001 | Step 1617/2944 | Loss 0.9651 | Time 361.7172\n",
      "Epoch 0001 | Step 1764/2944 | Loss 0.9613 | Time 394.6869\n",
      "Epoch 0001 | Step 1911/2944 | Loss 0.9587 | Time 426.9989\n",
      "Epoch 0001 | Step 2058/2944 | Loss 0.9573 | Time 459.3225\n",
      "Epoch 0001 | Step 2205/2944 | Loss 0.9555 | Time 491.6429\n",
      "Epoch 0001 | Step 2352/2944 | Loss 0.9543 | Time 523.9095\n",
      "Epoch 0001 | Step 2499/2944 | Loss 0.9526 | Time 556.8356\n",
      "Epoch 0001 | Step 2646/2944 | Loss 0.9517 | Time 589.1449\n",
      "Epoch 0001 | Step 2793/2944 | Loss 0.9504 | Time 621.3472\n",
      "Epoch 0001 | Step 2940/2944 | Loss 0.9483 | Time 654.3088\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4266    0.4741    0.4491      3126\n",
      "           1     0.4647    0.1283    0.2010      1692\n",
      "           2     0.6762    0.7701    0.7201      6319\n",
      "\n",
      "    accuracy                         0.5895     11137\n",
      "   macro avg     0.5225    0.4575    0.4567     11137\n",
      "weighted avg     0.5740    0.5895    0.5652     11137\n",
      "\n",
      "current macro_f1 is 0.4567, best macro_f1 is 0.4567\n",
      "time costed = 687.01094s \n",
      "\n",
      "***** Running training epoch 2 *****\n",
      "Epoch 0002 | Step 0147/2944 | Loss 0.8668 | Time 32.3262\n",
      "Epoch 0002 | Step 0294/2944 | Loss 0.8683 | Time 64.5705\n",
      "Epoch 0002 | Step 0441/2944 | Loss 0.8621 | Time 97.4523\n",
      "Epoch 0002 | Step 0588/2944 | Loss 0.8652 | Time 129.7202\n",
      "Epoch 0002 | Step 0735/2944 | Loss 0.8690 | Time 162.0825\n",
      "Epoch 0002 | Step 0882/2944 | Loss 0.8713 | Time 194.3202\n",
      "Epoch 0002 | Step 1029/2944 | Loss 0.8715 | Time 226.6785\n",
      "Epoch 0002 | Step 1176/2944 | Loss 0.8715 | Time 259.5341\n",
      "Epoch 0002 | Step 1323/2944 | Loss 0.8713 | Time 291.7876\n",
      "Epoch 0002 | Step 1470/2944 | Loss 0.8705 | Time 324.0552\n",
      "Epoch 0002 | Step 1617/2944 | Loss 0.8701 | Time 356.9303\n",
      "Epoch 0002 | Step 1764/2944 | Loss 0.8703 | Time 389.2574\n",
      "Epoch 0002 | Step 1911/2944 | Loss 0.8704 | Time 422.2896\n",
      "Epoch 0002 | Step 2058/2944 | Loss 0.8704 | Time 454.5943\n",
      "Epoch 0002 | Step 2205/2944 | Loss 0.8699 | Time 486.9090\n",
      "Epoch 0002 | Step 2352/2944 | Loss 0.8697 | Time 519.2231\n",
      "Epoch 0002 | Step 2499/2944 | Loss 0.8694 | Time 551.5752\n",
      "Epoch 0002 | Step 2646/2944 | Loss 0.8690 | Time 584.5205\n",
      "Epoch 0002 | Step 2793/2944 | Loss 0.8685 | Time 616.8569\n",
      "Epoch 0002 | Step 2940/2944 | Loss 0.8687 | Time 649.7402\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4292    0.4472    0.4380      3126\n",
      "           1     0.4162    0.1791    0.2504      1692\n",
      "           2     0.6753    0.7644    0.7171      6319\n",
      "\n",
      "    accuracy                         0.5864     11137\n",
      "   macro avg     0.5069    0.4636    0.4685     11137\n",
      "weighted avg     0.5669    0.5864    0.5679     11137\n",
      "\n",
      "current macro_f1 is 0.4685, best macro_f1 is 0.4685\n",
      "time costed = 682.72585s \n",
      "\n",
      "***** Running training epoch 3 *****\n",
      "Epoch 0003 | Step 0147/2944 | Loss 0.7623 | Time 32.3692\n",
      "Epoch 0003 | Step 0294/2944 | Loss 0.7582 | Time 65.2797\n",
      "Epoch 0003 | Step 0441/2944 | Loss 0.7567 | Time 97.5976\n",
      "Epoch 0003 | Step 0588/2944 | Loss 0.7538 | Time 129.9314\n",
      "Epoch 0003 | Step 0735/2944 | Loss 0.7543 | Time 162.9155\n",
      "Epoch 0003 | Step 0882/2944 | Loss 0.7541 | Time 195.8294\n",
      "Epoch 0003 | Step 1029/2944 | Loss 0.7542 | Time 228.7288\n",
      "Epoch 0003 | Step 1176/2944 | Loss 0.7538 | Time 261.0268\n",
      "Epoch 0003 | Step 1323/2944 | Loss 0.7553 | Time 293.3617\n",
      "Epoch 0003 | Step 1470/2944 | Loss 0.7549 | Time 326.2916\n",
      "Epoch 0003 | Step 1617/2944 | Loss 0.7554 | Time 358.5939\n",
      "Epoch 0003 | Step 1764/2944 | Loss 0.7560 | Time 390.9303\n",
      "Epoch 0003 | Step 1911/2944 | Loss 0.7551 | Time 423.2484\n",
      "Epoch 0003 | Step 2058/2944 | Loss 0.7554 | Time 455.5804\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "wandb: Network error (ReadTimeout), entering retry loop.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0003 | Step 2205/2944 | Loss 0.7555 | Time 488.5336\n",
      "Epoch 0003 | Step 2352/2944 | Loss 0.7555 | Time 520.8883\n",
      "Epoch 0003 | Step 2499/2944 | Loss 0.7553 | Time 553.2044\n",
      "Epoch 0003 | Step 2646/2944 | Loss 0.7541 | Time 585.5551\n",
      "Epoch 0003 | Step 2793/2944 | Loss 0.7536 | Time 617.8601\n",
      "Epoch 0003 | Step 2940/2944 | Loss 0.7528 | Time 650.7858\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4063    0.5003    0.4485      3126\n",
      "           1     0.3670    0.2488    0.2966      1692\n",
      "           2     0.6958    0.6762    0.6859      6319\n",
      "\n",
      "    accuracy                         0.5619     11137\n",
      "   macro avg     0.4897    0.4751    0.4770     11137\n",
      "weighted avg     0.5646    0.5619    0.5601     11137\n",
      "\n",
      "current macro_f1 is 0.4770, best macro_f1 is 0.4770\n",
      "time costed = 683.18004s \n",
      "\n",
      "***** Running training epoch 4 *****\n",
      "Epoch 0004 | Step 0147/2944 | Loss 0.6889 | Time 32.2676\n",
      "Epoch 0004 | Step 0294/2944 | Loss 0.6871 | Time 65.1942\n",
      "Epoch 0004 | Step 0441/2944 | Loss 0.6864 | Time 97.5459\n",
      "Epoch 0004 | Step 0588/2944 | Loss 0.6865 | Time 130.4640\n",
      "Epoch 0004 | Step 0735/2944 | Loss 0.6862 | Time 162.7648\n",
      "Epoch 0004 | Step 0882/2944 | Loss 0.6844 | Time 195.6876\n",
      "Epoch 0004 | Step 1029/2944 | Loss 0.6835 | Time 227.9828\n",
      "Epoch 0004 | Step 1176/2944 | Loss 0.6830 | Time 260.9164\n",
      "Epoch 0004 | Step 1323/2944 | Loss 0.6832 | Time 293.8563\n",
      "Epoch 0004 | Step 1470/2944 | Loss 0.6830 | Time 326.1493\n",
      "Epoch 0004 | Step 1617/2944 | Loss 0.6825 | Time 358.4358\n",
      "Epoch 0004 | Step 1764/2944 | Loss 0.6827 | Time 390.7477\n",
      "Epoch 0004 | Step 1911/2944 | Loss 0.6827 | Time 423.0427\n",
      "Epoch 0004 | Step 2058/2944 | Loss 0.6837 | Time 455.9676\n",
      "Epoch 0004 | Step 2205/2944 | Loss 0.6830 | Time 488.2987\n",
      "Epoch 0004 | Step 2352/2944 | Loss 0.6833 | Time 520.5746\n",
      "Epoch 0004 | Step 2499/2944 | Loss 0.6833 | Time 553.4997\n",
      "Epoch 0004 | Step 2646/2944 | Loss 0.6831 | Time 586.4383\n",
      "Epoch 0004 | Step 2793/2944 | Loss 0.6838 | Time 618.7363\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "wandb: Network error (ReadTimeout), entering retry loop.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0004 | Step 2940/2944 | Loss 0.6837 | Time 651.7292\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.4081    0.4463    0.4263      3126\n",
      "           1     0.3339    0.2388    0.2784      1692\n",
      "           2     0.6855    0.7061    0.6957      6319\n",
      "\n",
      "    accuracy                         0.5622     11137\n",
      "   macro avg     0.4758    0.4637    0.4668     11137\n",
      "weighted avg     0.5542    0.5622    0.5567     11137\n",
      "\n",
      "current macro_f1 is 0.4668, best macro_f1 is 0.4770\n",
      "time costed = 680.92847s \n",
      "\n",
      "***** Running training epoch 5 *****\n",
      "Epoch 0005 | Step 0147/2944 | Loss 0.6666 | Time 32.3134\n",
      "Epoch 0005 | Step 0294/2944 | Loss 0.6680 | Time 65.2539\n",
      "Epoch 0005 | Step 0441/2944 | Loss 0.6716 | Time 98.1224\n",
      "Epoch 0005 | Step 0588/2944 | Loss 0.6737 | Time 130.3850\n",
      "Epoch 0005 | Step 0735/2944 | Loss 0.6743 | Time 163.2994\n",
      "Epoch 0005 | Step 0882/2944 | Loss 0.6738 | Time 196.1882\n",
      "Epoch 0005 | Step 1029/2944 | Loss 0.6744 | Time 228.4969\n",
      "Epoch 0005 | Step 1176/2944 | Loss 0.6732 | Time 261.4477\n",
      "Epoch 0005 | Step 1323/2944 | Loss 0.6730 | Time 293.7308\n",
      "Epoch 0005 | Step 1470/2944 | Loss 0.6735 | Time 326.0031\n",
      "Epoch 0005 | Step 1617/2944 | Loss 0.6735 | Time 358.3508\n",
      "Epoch 0005 | Step 1764/2944 | Loss 0.6731 | Time 390.6439\n",
      "Epoch 0005 | Step 1911/2944 | Loss 0.6726 | Time 423.5422\n",
      "Epoch 0005 | Step 2058/2944 | Loss 0.6721 | Time 455.8175\n",
      "Epoch 0005 | Step 2205/2944 | Loss 0.6718 | Time 488.7860\n",
      "Epoch 0005 | Step 2352/2944 | Loss 0.6718 | Time 521.0911\n",
      "Epoch 0005 | Step 2499/2944 | Loss 0.6719 | Time 553.9668\n",
      "Epoch 0005 | Step 2646/2944 | Loss 0.6716 | Time 586.8802\n",
      "Epoch 0005 | Step 2793/2944 | Loss 0.6713 | Time 619.1754\n",
      "Epoch 0005 | Step 2940/2944 | Loss 0.6711 | Time 651.4756\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3991    0.4789    0.4354      3126\n",
      "           1     0.3264    0.2506    0.2835      1692\n",
      "           2     0.6930    0.6675    0.6800      6319\n",
      "\n",
      "    accuracy                         0.5512     11137\n",
      "   macro avg     0.4728    0.4657    0.4663     11137\n",
      "weighted avg     0.5548    0.5512    0.5511     11137\n",
      "\n",
      "current macro_f1 is 0.4663, best macro_f1 is 0.4770\n",
      "time costed = 680.68402s \n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 训练和验证评估\n",
    "train_and_eval(model, train_loader, valid_loader, optimizer, scheduler, DEVICE, EPOCHS)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载最优模型测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0     0.3800    0.4946    0.4298      7991\n",
      "           1     0.4144    0.2686    0.3260      6470\n",
      "           2     0.6028    0.5967    0.5997     12977\n",
      "\n",
      "    accuracy                         0.4896     27438\n",
      "   macro avg     0.4657    0.4533    0.4518     27438\n",
      "weighted avg     0.4935    0.4896    0.4857     27438\n",
      "\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "Waiting for W&B process to finish... <strong style=\"color:green\">(success).</strong>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6ed28550404d42ebbeccd550e0a422f4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "VBox(children=(Label(value='0.001 MB of 0.001 MB uploaded (0.000 MB deduped)\\r'), FloatProgress(value=1.0, max…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "<style>\n",
       "    table.wandb td:nth-child(1) { padding: 0 10px; text-align: right }\n",
       "    .wandb-row { display: flex; flex-direction: row; flex-wrap: wrap; width: 100% }\n",
       "    .wandb-col { display: flex; flex-direction: column; flex-basis: 100%; flex: 1; padding: 10px; }\n",
       "    </style>\n",
       "<div class=\"wandb-row\"><div class=\"wandb-col\"><h3>Run history:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>▁▁▁▁▁▁▁▁▃▃▃▃▃▃▃▃▅▅▅▅▅▅▅▅▆▆▆▆▆▆▆▆████████</td></tr><tr><td>lr</td><td>▂▄███████▇▇▇▇▇▆▆▆▆▅▅▅▄▄▄▄▃▃▃▂▂▂▂▂▁▁▁▁▁▁▁</td></tr><tr><td>train_loss</td><td>▆▆▆█▅▆▅▅▅▅▆▃█▆▇▃▅▅▂▁▇▆▂▃▁▂▁▂▃▂▃▄▂▁▁▁▃▁▂▁</td></tr><tr><td>valid_acc</td><td>█▇▃▃▁</td></tr><tr><td>valid_loss</td><td>▁▁▆▇█</td></tr></table><br/></div><div class=\"wandb-col\"><h3>Run summary:</h3><br/><table class=\"wandb\"><tr><td>Epoch</td><td>5</td></tr><tr><td>lr</td><td>0.0</td></tr><tr><td>train_loss</td><td>0.60229</td></tr><tr><td>valid_acc</td><td>0.55123</td></tr><tr><td>valid_loss</td><td>0.96711</td></tr></table><br/></div></div>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Synced <strong style=\"color:#cdcd00\">eternal-music-8</strong>: <a href=\"https://wandb.ai/qftie/cped-emo-cls/runs/yyrvwcwa\" target=\"_blank\">https://wandb.ai/qftie/cped-emo-cls/runs/yyrvwcwa</a><br/>Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/html": [
       "Find logs at: <code>./wandb/run-20220509_162042-yyrvwcwa/logs</code>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 加载最优权重对测试集测试\n",
    "model.load_state_dict(torch.load(\"output/ch-roberta-dorwardUt/pytorch_model.bin\"))\n",
    "pred_test = evaluate(model, test_loader, DEVICE)\n",
    "# print(\"\\n Test Accuracy = {} \\n\".format(accuracy_score(test_label, pred_test)))\n",
    "# print(classification_report(test_label, pred_test, digits=4))\n",
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "# output_dir = 'output/ch-roberta-dorwardUt'\n",
    "# os.makedirs(output_dir, exist_ok=True)\n",
    "# torch.save(model.state_dict(), output_dir+\"/pytorch_model.bin\")\n",
    "# # torch.save(model, \"pytorch_model_whole.bin\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.cuda.empty_cache()\n",
    "torch.cuda.empty_cache()\n",
    "torch.cuda.empty_cache()"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "49e5bd26eb5fb0a8ffb649f62262c127e261ba51230dc2578599ab5938abf7ca"
  },
  "kernelspec": {
   "display_name": "Python 3.8.0 64-bit ('torch17py38': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
