{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 一、函数及数据准备"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1、调库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import re\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3、导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CSV_FILE_PATH_TRAIN = 'D:/alltrain/train.csv'   #训练集\n",
    "dfTrain = pd.read_csv(CSV_FILE_PATH_TRAIN)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 4、清洗数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dfTrain.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "#正则匹配，清除非汉字部分\n",
    "def findChinese(sentence):\n",
    "    pattern = re.compile(r'[^\\u4e00-\\u9fa5]')\n",
    "    sentence = re.sub(pattern, '', sentence)\n",
    "    return sentence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "question_beforeClean = np.array(dfTrain.iloc[:, dfTrain.shape[1] - 1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "question_afterClean = np.array([findChinese(x) for x in question_beforeClean])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 二、建立字典"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1、统计所有不同的字"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getDifferentWord(data):\n",
    "    dic_key = []\n",
    "    for question in data:\n",
    "        for tip in list(question):\n",
    "            if tip not in dic_key:\n",
    "                dic_key.append(tip)\n",
    "    return dic_key"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dic_key = getDifferentWord(question_afterClean)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dic_key[:15]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2、 建立字典"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "bin()  #十进制转二进制函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dic = {}\n",
    "value = 1\n",
    "for key in dic_key:\n",
    "    dic[key] = value #在此转换为二进制bin()\n",
    "    value += 1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##  三、建立训练矩阵（将问句转为二进制向量）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1、找到最大维度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def findDimension(data):\n",
    "    dimension = np.max([len(list(x)) for x in data])\n",
    "    return dimension"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dimension = findDimension(question_afterClean)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dimension"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2、生成训练集矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = np.zeros((len(question_afterClean), dimension))    #训练集x\n",
    "X.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getValues(sentence, dic):\n",
    "    result = []\n",
    "    for word in list(sentence):\n",
    "        result.append(dic.get(word))\n",
    "    return np.array(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(len(question_afterClean)):\n",
    "    X[i][:len(list(question_afterClean[i]))] = getValues(question_afterClean[i], dic)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y = np.array(dfTrain.iloc[:,1:7])\n",
    "y.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X[:10,:20]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3、分割数据   比例4000 ：1000  随机种子666"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1000, random_state = 666)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.sum(y_train[:,5])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sum(y_train)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 四、训练数据   （使用pytorch）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if torch.cuda.is_available():\n",
    "    device = torch.device(\"cuda\")          # a CUDA device object"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1、转换数据格式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def changeType(X_data, y_data):  #将array数据转化为longTensor\n",
    "    return torch.from_numpy(X_data).float(), torch.from_numpy(y_data).float()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_torch_train, y_torch_train = changeType(X_train, y_train)\n",
    "X_torch_test, y_torch_test = changeType(X_test, y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_torch_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_torch_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_torch_train = X_torch_train.reshape(X_torch_train.shape[0],1, X_train.shape[1])\n",
    "X_torch_test = X_torch_test.reshape(X_torch_test.shape[0],1, X_test.shape[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_torch_train.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2、创建并训练神经网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(CNN, self).__init__()\n",
    "        self.conv1 = nn.Sequential(  # input shape (1, 1, 1622)\n",
    "            nn.Conv1d(\n",
    "                in_channels=1,      # input height\n",
    "                out_channels=10,    # n_filters\n",
    "                kernel_size=3,      # filter size\n",
    "                stride=1,           # filter movement/step\n",
    "            ),      # output shape (10, 1, 1620)\n",
    "            nn.ReLU(),    # activation\n",
    "            nn.MaxPool1d(kernel_size=2),    # 在 1x2 空间里向下采样, output shape (10, 1, 810)\n",
    "        )\n",
    "        self.conv2 = nn.Sequential(  # input shape (20, 1, 810)\n",
    "            nn.Conv1d(10, 20, 3, 1),  # output shape (20, 1, 808)\n",
    "            nn.ReLU(),  # activation\n",
    "            nn.MaxPool1d(2),  # output shape (20, 1, 404)\n",
    "        )\n",
    "        self.out = nn.Linear(20 * 1 * 404, 6)   # fully connected layer\n",
    "        \n",
    "\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = x.view(x.size(0), -1)   # 展平多维的卷积图成 (batch_size, 20 * 1 * 404)\n",
    "        output = self.out(x)\n",
    "        return output\n",
    "\n",
    "cnn = CNN()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net=CNN()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = net.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = torch.optim.SGD(net.parameters(), lr=0.02)  # 传入 net 的所有参数, 学习率\n",
    "loss_func = torch.nn.BCEWithLogitsLoss()#BCEWithLogitsLoss()针对多标签分类 整合sigmoid，输出的为概率\n",
    "loss_func = loss_func.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_torch_train, y_torch_train = X_torch_train.cuda(), y_torch_train.cuda()   #转为gpu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "acc= [[],[],[],[],[],[]]\n",
    "lossSet= []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "%%time\n",
    "for t in range(3000):\n",
    "    out = net(X_torch_train)     # 喂给 net 训练数据 x, 输出分析值\n",
    "    loss = loss_func(out, y_torch_train)     # 计算两者的误差\n",
    "    optimizer.zero_grad()   # 清空上一步的残余更新参数值\n",
    "    loss.backward()         # 误差反向传播, 计算参数更新值\n",
    "    optimizer.step()        # 将参数更新值施加到 net 的 parameters 上\n",
    "    if t % 2 == 0:\n",
    "        lossSet.append(loss.cpu().data.numpy())\n",
    "        prediction = F.sigmoid(out).cpu() > 0.5\n",
    "        predictRs = prediction.int()\n",
    "        for i in range(len(acc)):\n",
    "            acc[i].append(accuracy_score(y_train[:,i], predictRs[:,i]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sum(y_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sum(predictRs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getS(loss, acc):\n",
    "  with open('CNNLoss.txt','w') as f:\n",
    "    for i in loss:\n",
    "      print(i, file=f)\n",
    "  with open('CNNAcc1.txt','w') as f:\n",
    "    for i in acc[0]:\n",
    "      print(i, file=f)\n",
    "  with open('CNNAcc2.txt','w') as f:\n",
    "    for i in acc[1]:\n",
    "      print(i, file=f)\n",
    "  with open('CNNAcc3.txt','w') as f:\n",
    "    for i in acc[2]:\n",
    "      print(i, file=f)\n",
    "  with open('CNNAcc4.txt','w') as f:\n",
    "    for i in acc[3]:\n",
    "      print(i, file=f)\n",
    "  with open('CNNAcc5.txt','w') as f:\n",
    "    for i in acc[4]:\n",
    "      print(i, file=f)\n",
    "  with open('CNNAcc6.txt','w') as f:\n",
    "    for i in acc[5]:\n",
    "      print(i, file=f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "getS(lossSet, acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = net.cpu()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "s = torch.sigmoid(net(X_torch_test)) > 0.5"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "scrolled": true
   },
   "source": [
    "y_predict = s.data.numpy().astype(int)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "scrolled": true
   },
   "source": [
    "precisionSet = []\n",
    "for i in range(6):\n",
    "    precisionSet.append(accuracy_score(y_train[:,i], y_predict[:,i]))\n",
    "print(precisionSet)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "precisionSet = []\n",
    "for i in range(6):\n",
    "    precisionSet.append(precision_score(y_train[:,i], y_predict[:,i]))\n",
    "print(precisionSet)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "precisionSet = []\n",
    "for i in range(6):\n",
    "    precisionSet.append(f1_score(y_train[:,i], y_predict[:,i]))\n",
    "print(precisionSet)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "recallSet = []\n",
    "for i in range(6):\n",
    "    recallSet.append(recall_score(y_train[:,i], y_predict[:,i]))\n",
    "recallSet"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3、预测结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sum(predictRs[:,4])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(X_test):\n",
    "    test_out = net(X_test)\n",
    "    predictRs = F.sigmoid(test_out).data.numpy() > 0.5\n",
    "    predictRs = predictRs.astype(np.int32)\n",
    "    return predictRs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_torch_train, y_torch_train = X_torch_train.cpu(), y_torch_train.cpu()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_pred = predict(X_torch_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "loss.cpu()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_pred.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 四、获取指标   准确率   精准率   召回率   F1-score"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1、准确率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import accuracy_score\n",
    "from sklearn.metrics import precision_score\n",
    "from sklearn.metrics import recall_score\n",
    "from sklearn.metrics import f1_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "accuracySet = []\n",
    "for i in range(6):\n",
    "    #normalize = False  默认为True  如果为False  则显示匹配数量\n",
    "    accuracySet.append(accuracy_score(y_test[:,i], y_pred[:,i]))\n",
    "accuracySet"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2、精确率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import precision_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "precisionSet = []\n",
    "for i in range(6):\n",
    "    precisionSet.append(precision_score(y_test[:,i], y_pred[:,i]))\n",
    "print(precisionSet)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "def precision(y_test, y_pred):\n",
    "    P = 0\n",
    "    TP = 0\n",
    "    for i in range(len(y_test)):\n",
    "        if (y_pred[i] == 1):\n",
    "            P += 1\n",
    "            if (y_test[i] == 1):\n",
    "                TP += 1\n",
    "    print(P, TP)\n",
    "    return TP / P"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "precision(y_train[:,i], y_pred[:,i])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3、召回率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import recall_score\n",
    "recallSet = []\n",
    "for i in range(6):\n",
    "    recallSet.append(recall_score(y_test[:,i], y_pred[:,i]))\n",
    "recallSet"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "def recall(y_test, y_pred):\n",
    "    P = 0\n",
    "    TP = 0\n",
    "    for i in range(len(y_test)):\n",
    "        if (y_test[i] == 1):\n",
    "            P += 1\n",
    "            if (y_pred[i] == 1):\n",
    "                TP += 1\n",
    "    print(P, TP)\n",
    "    return TP / P"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "recall(y_train[:,i], y_pred[:,i])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 4、F1-score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import f1_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "f1Set = []\n",
    "for i in range(6):\n",
    "    f1Set.append(f1_score(y_test[:,i], y_pred[:,i]))\n",
    "f1Set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def getF1Score(y_test, y_pred):\n",
    "    totalF1 = 0\n",
    "    for i in range(y_test.shape[1]):\n",
    "        totalF1 += f1_score(y_test[:,i], y_pred[:,i])\n",
    "        print(f1_score(y_test[:,i], y_pred[:,i]))\n",
    "    return totalF1 / y_test.shape[1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "getF1Score(y_test, y_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sum(y_pred[:,0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(lossSet)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "axis = [ x*2  for x in range(0,1500)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(axis,lossSet)\n",
    "plt.text(1250, 0.36, 'loss' , fontdict={'size': 20, 'color': 'red'})\n",
    "plt.xlabel('epoch')\n",
    "plt.ylabel('loss')\n",
    "#plt.ylim(0.32,0.38)\n",
    "plt.xlim(0,2000)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "lossSet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(axis,acc[0])\n",
    "plt.text(1100, 0.62, 'acc of label1' , fontdict={'size': 20, 'color': 'red'})\n",
    "plt.ylim(0.6,0.67)\n",
    "plt.xlabel('epoch')\n",
    "plt.ylabel('acc')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(axis,acc[1])\n",
    "plt.text(1000, 0.645, 'acc of label2' , fontdict={'size': 20, 'color': 'red'})\n",
    "plt.xlabel('epoch')\n",
    "plt.ylim(0.63,0.68)\n",
    "plt.ylabel('acc')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(axis,acc[2])\n",
    "plt.text(250, 0.7, 'acc of label3' , fontdict={'size': 20, 'color': 'red'})\n",
    "plt.xlabel('epoch')\n",
    "plt.ylabel('acc')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(axis,acc[3])\n",
    "plt.text(1000, 0.84, 'acc of label4' , fontdict={'size': 20, 'color': 'red'})\n",
    "plt.ylim(0.82,0.875)\n",
    "plt.xlabel('epoch')\n",
    "plt.ylabel('acc')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(axis,acc[4])\n",
    "plt.text(1000, 0.895, 'acc of label5' , fontdict={'size': 20, 'color': 'red'})\n",
    "plt.ylim(0.89,0.91)\n",
    "plt.xlabel('epoch')\n",
    "plt.ylabel('acc')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot(axis,acc[5])\n",
    "plt.text(1000, 0.955, 'acc of label6' , fontdict={'size': 20, 'color': 'red'})\n",
    "plt.ylim(0.95,0.965)\n",
    "plt.xlabel('epoch')\n",
    "plt.ylabel('acc')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "name_list = ['label1','label2','label3','label4','label5','label6']  \n",
    "plt.bar(range(len(name_list)), precisionSet,color='rgb',tick_label=name_list)  \n",
    "plt.xlabel('label')\n",
    "plt.ylabel('precision')\n",
    "plt.show()  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "name_list = ['label1','label2','label3','label4','label5','label6']  \n",
    "plt.bar(range(len(name_list)), recallSet,color='rgb',tick_label=name_list)  \n",
    "plt.xlabel('label')\n",
    "plt.ylabel('recall')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "name_list = ['label1','label2','label3','label4','label5','label6']  \n",
    "plt.bar(range(len(name_list)), f1Set,color='rgb',tick_label=name_list)  \n",
    "plt.xlabel('label')\n",
    "plt.ylabel('f1')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
