{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "0630fee5-c6e6-45b7-afd7-c4cfd6eb4d03",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:03.143200Z",
     "iopub.status.busy": "2022-03-15T09:37:03.142917Z",
     "iopub.status.idle": "2022-03-15T09:37:04.940433Z",
     "shell.execute_reply": "2022-03-15T09:37:04.939798Z",
     "shell.execute_reply.started": "2022-03-15T09:37:03.143179Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\n",
      "Requirement already satisfied: paddlenlp in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (2.2.4)\n",
      "Requirement already satisfied: h5py in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp) (2.9.0)\n",
      "Requirement already satisfied: colorama in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp) (0.4.4)\n",
      "Requirement already satisfied: multiprocess in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp) (0.70.11.1)\n",
      "Requirement already satisfied: jieba in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp) (0.42.1)\n",
      "Requirement already satisfied: seqeval in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp) (1.2.2)\n",
      "Requirement already satisfied: colorlog in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from paddlenlp) (4.1.0)\n",
      "Requirement already satisfied: numpy>=1.7 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from h5py->paddlenlp) (1.19.5)\n",
      "Requirement already satisfied: six in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from h5py->paddlenlp) (1.16.0)\n",
      "Requirement already satisfied: dill>=0.3.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from multiprocess->paddlenlp) (0.3.3)\n",
      "Requirement already satisfied: scikit-learn>=0.21.3 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from seqeval->paddlenlp) (0.24.2)\n",
      "Requirement already satisfied: joblib>=0.11 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from scikit-learn>=0.21.3->seqeval->paddlenlp) (0.14.1)\n",
      "Requirement already satisfied: scipy>=0.19.1 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from scikit-learn>=0.21.3->seqeval->paddlenlp) (1.6.3)\n",
      "Requirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages (from scikit-learn>=0.21.3->seqeval->paddlenlp) (2.1.0)\n",
      "\u001B[33mWARNING: You are using pip version 21.3.1; however, version 22.0.4 is available.\n",
      "You should consider upgrading via the '/opt/conda/envs/python35-paddle120-env/bin/python -m pip install --upgrade pip' command.\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "# 下载paddlenlp\n",
    "!pip install -U paddlenlp"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7c7616ba-099c-4b01-947a-e953bb0655f6",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "# 配置文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "5108981a-dbf9-4e87-bf54-0d221216feaa",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:04.942973Z",
     "iopub.status.busy": "2022-03-15T09:37:04.942722Z",
     "iopub.status.idle": "2022-03-15T09:37:07.014671Z",
     "shell.execute_reply": "2022-03-15T09:37:07.014079Z",
     "shell.execute_reply.started": "2022-03-15T09:37:04.942949Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlenlp/transformers/funnel/modeling.py:30: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  from collections import Iterable\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "\n",
    "from collections import defaultdict\n",
    "import paddle\n",
    "from paddlenlp.transformers import AutoTokenizer\n",
    "from paddle import optimizer, nn\n",
    "from paddlenlp.transformers import AutoModelForSequenceClassification,ErnieForSequenceClassification\n",
    "from extra import  extra_fgm\n",
    "from paddlenlp.transformers import LinearDecayWithWarmup\n",
    "from paddle.io import Dataset, DataLoader\n",
    "import pandas as pd\n",
    "from collections import defaultdict\n",
    "from sklearn.utils import shuffle\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "\n",
    "class Config:\n",
    "    # 数据加载部分\n",
    "    dataset = 'paws-x'\n",
    "    max_seq_len = 64  # 句子长度\n",
    "    need_data_aug = True\n",
    "    operation = 'Train' # 训练/预测\n",
    "    # 模型部分\n",
    "    model_path = 'D:/env/bert_model/hfl/chinese-bert-wwm-ext'  # 本地模型路径\n",
    "    model_suffix = '-kflod' # 模型后缀\n",
    "    tokenizer = None  # tokenizer对象\n",
    "    load_model = False  # 是否加载已有模型预测\n",
    "    save_model = True  # 是否保存训练好的模型\n",
    "    # 训练部分\n",
    "    device = 'cpu'\n",
    "    learning_rate = 5e-6\n",
    "    batch_size = 512  # batch大小\n",
    "    epochs = 20  # 训练次数\n",
    "    print_loss = 20  # 打印loss次数\n",
    "    num_labels = 2  # 分类数\n",
    "    adv = 'fgm' # 对抗训练方式\n",
    "    eps = 0.1 # 干扰因子\n",
    "    # 其他参数\n",
    "    random_seed = 2022 # 随机种子\n",
    "    k_flod = 5 # 交叉训练"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "de30fe93-d0fc-48b4-becf-31de00ad87fe",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "# 解压并读取数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "c91a9670-85e8-4df1-9fbb-73d6fc044369",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.015905Z",
     "iopub.status.busy": "2022-03-15T09:37:07.015654Z",
     "iopub.status.idle": "2022-03-15T09:37:07.021617Z",
     "shell.execute_reply": "2022-03-15T09:37:07.021164Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.015882Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "# 数据增强\n",
    "def aug_group_by_a(df):\n",
    "    aug_data = defaultdict(list)\n",
    "    # 以text_a中的句子为 a\n",
    "    for g, data in df.groupby(by=['text_a']):\n",
    "        if len(data) < 2:\n",
    "            continue\n",
    "        for i in range(len(data)):\n",
    "            for j in range(i + 1, len(data)):\n",
    "                # 取出b的值，a,b的label\n",
    "                row_i_text = data.iloc[i, 1]\n",
    "                row_i_label = data.iloc[i, 2]\n",
    "\n",
    "                # 取出c的值，a,c的label\n",
    "                row_j_text = data.iloc[j, 1]\n",
    "                row_j_label = data.iloc[j, 2]\n",
    "\n",
    "                if row_i_label == row_j_label == 0:\n",
    "                    continue\n",
    "\n",
    "                aug_label = 1 if row_i_label == row_j_label == 1 else 0\n",
    "\n",
    "                aug_data['text_a'].append(row_i_text)\n",
    "                aug_data['text_b'].append(row_j_text)\n",
    "                aug_data['label'].append(aug_label)\n",
    "    return pd.DataFrame(aug_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "10221a68-ff95-4cda-867a-edd9259a0c6a",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.022543Z",
     "iopub.status.busy": "2022-03-15T09:37:07.022315Z",
     "iopub.status.idle": "2022-03-15T09:37:07.029295Z",
     "shell.execute_reply": "2022-03-15T09:37:07.028877Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.022524Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# 裁剪或填充\n",
    "def pad(text, max_seq_len, pad_str):\n",
    "    new_text = [pad_str for i in range(max_seq_len)]\n",
    "    if len(text) < max_seq_len:\n",
    "        new_text[:len(text)] = text\n",
    "        return ''.join(new_text)\n",
    "    else:\n",
    "        return ''.join(text[:max_seq_len])\n",
    "# 去除空置\n",
    "def drop_na(df):\n",
    "    if len(set(df['label'])) > 2:\n",
    "        df = df[df['label'].isin(['0', '1'])]\n",
    "        df['label'] = df['label'].astype('int')\n",
    "    df = df.dropna()\n",
    "    return df\n",
    "# 转换数据\n",
    "def data_encoder(df, tokenizer):\n",
    "    inputs = defaultdict(list)\n",
    "    for i, row in tqdm(df.iterrows(), desc='encode data', total=len(df)):\n",
    "        seq_a = row[0]\n",
    "        seq_b = row[1]\n",
    "        label = row[2]\n",
    "        inputs_dict = tokenizer.encode(seq_a, seq_b, return_special_tokens_mask=True, return_token_type_ids=True,\n",
    "                                                        return_attention_mask=True, max_seq_len=config.max_seq_len, pad_to_max_seq_len=True)\n",
    "        inputs['input_ids'].append(inputs_dict['input_ids'])\n",
    "        inputs['token_type_ids'].append(inputs_dict['token_type_ids'])\n",
    "        inputs['attention_mask'].append(inputs_dict['attention_mask'])\n",
    "        inputs['labels'].append(label)\n",
    "    return inputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "5253ee1a-006a-4b5b-ba4b-4932892b6b1b",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.030229Z",
     "iopub.status.busy": "2022-03-15T09:37:07.030083Z",
     "iopub.status.idle": "2022-03-15T09:37:07.044312Z",
     "shell.execute_reply": "2022-03-15T09:37:07.043882Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.030211Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# 读取数据\n",
    "def read_data(config: Config):\n",
    "    if config.operation == 'train':\n",
    "        train = pd.read_csv('data/data52714/' + config.dataset + '/train.tsv', sep='\\t',\n",
    "                            names=['text_a', 'text_b', 'label'])\n",
    "        dev = pd.read_csv('data/data52714/' + config.dataset + '/dev.tsv', sep='\\t',\n",
    "                          names=['text_a', 'text_b', 'label'])\n",
    "\n",
    "        test_size = len(dev) / (len(train)+len(dev))\n",
    "\n",
    "        if len(set(train['label'])) > 2:\n",
    "            train = train[train['label'].isin(['0', '1'])]\n",
    "            train['label'] = train['label'].astype('int')\n",
    "        train = train.dropna()\n",
    "\n",
    "        if len(set(train['label'])) > 2:\n",
    "            dev = dev[dev['label'].isin(['0', '1'])]\n",
    "            dev['label'] = dev['label'].astype('int')\n",
    "        dev = dev.dropna()\n",
    "\n",
    "        # 最终返回的数据\n",
    "        data = pd.concat([train, dev])\n",
    "\n",
    "        # 数据增强，加大训练集数据量\n",
    "        if config.need_data_aug is True:\n",
    "            aug_train = aug_group_by_a(train)\n",
    "            aug_dev = aug_group_by_a(dev)\n",
    "            # 拼接数据\n",
    "            data = pd.concat([data, aug_train, aug_dev])\n",
    "        # 随机切分数据\n",
    "        X = data[['text_a', 'text_b']]\n",
    "        y = data['label']\n",
    "        X_train, X_dev, y_train, y_dev = train_test_split(\n",
    "            X, y, random_state=config.random_seed, test_size=test_size)\n",
    "        X_train['label'] = y_train\n",
    "        X_dev['label'] = y_dev\n",
    "        # tokenizer\n",
    "        tokenizer = config.tokenizer\n",
    "        data_df = {'train': X_train, 'dev': X_dev}\n",
    "        full_data_dict = {}\n",
    "        for k, df in data_df.items():\n",
    "            inputs = defaultdict(list)\n",
    "            for i, row in tqdm(df.iterrows(), desc='encode {} data'.format(k), total=len(df)):\n",
    "                seq_a = row[0]\n",
    "                seq_b = row[1]\n",
    "                label = row[2]\n",
    "                inputs_dict = tokenizer.encode(seq_a, seq_b, return_special_tokens_mask=True,\n",
    "                                               return_token_type_ids=True,\n",
    "                                               return_attention_mask=True, max_seq_len=config.max_seq_len,\n",
    "                                               pad_to_max_seq_len=True)\n",
    "                inputs['input_ids'].append(inputs_dict['input_ids'])\n",
    "                inputs['token_type_ids'].append(inputs_dict['token_type_ids'])\n",
    "                inputs['attention_mask'].append(inputs_dict['attention_mask'])\n",
    "                inputs['labels'].append(label)\n",
    "            full_data_dict[k] = inputs\n",
    "        return full_data_dict['train'], full_data_dict['dev']\n",
    "    elif config.operation == 'predict':\n",
    "        test = pd.read_csv('data/data52714/' + config.dataset + '/test.tsv', sep='\\t', names=['text_a', 'text_b'])\n",
    "        test['label'] = 0\n",
    "        # tokenizer\n",
    "        tokenizer = config.tokenizer\n",
    "        data_df = {'test': test}\n",
    "        full_data_dict = {}\n",
    "        for k, df in data_df.items():\n",
    "            inputs = defaultdict(list)\n",
    "            for i, row in tqdm(df.iterrows(), desc='encode {} data'.format(k), total=len(df)):\n",
    "                seq_a = row[0]\n",
    "                seq_b = row[1]\n",
    "                label = row[2]\n",
    "                inputs_dict = tokenizer.encode(seq_a, seq_b, return_special_tokens_mask=True,\n",
    "                                               return_token_type_ids=True,\n",
    "                                               return_attention_mask=True, max_seq_len=config.max_seq_len,\n",
    "                                               pad_to_max_seq_len=True)\n",
    "                inputs['input_ids'].append(inputs_dict['input_ids'])\n",
    "                inputs['token_type_ids'].append(inputs_dict['token_type_ids'])\n",
    "                inputs['attention_mask'].append(inputs_dict['attention_mask'])\n",
    "                inputs['labels'].append(label)\n",
    "            full_data_dict[k] = inputs\n",
    "        return full_data_dict['test'], len(test)\n",
    "    else:\n",
    "        raise Exception('错误的模型行为!')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "79e88c60-9bf3-4177-b7e8-d3c57f269602",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "# 转换数据至dataset和dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "6d8128bb-5cf8-4208-99d8-12bcb61cb3d2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.045323Z",
     "iopub.status.busy": "2022-03-15T09:37:07.044947Z",
     "iopub.status.idle": "2022-03-15T09:37:07.052724Z",
     "shell.execute_reply": "2022-03-15T09:37:07.052308Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.045303Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "\n",
    "class SimDataset(Dataset):\n",
    "    def __init__(self, data_dict):\n",
    "        super(SimDataset, self).__init__()\n",
    "        self.input_ids = data_dict['input_ids']\n",
    "        self.token_type_ids = data_dict['token_type_ids']\n",
    "        self.attention_mask = data_dict['attention_mask']\n",
    "        self.labels = data_dict['labels']\n",
    "        self.len = len(self.input_ids)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        data = (self.input_ids[index],\n",
    "                self.token_type_ids[index],\n",
    "                self.attention_mask[index],\n",
    "                self.labels[index])\n",
    "        return data\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.len\n",
    "\n",
    "class Collator:\n",
    "\n",
    "    def __init__(self, tokenizer):\n",
    "        self.tokenizer = tokenizer\n",
    "\n",
    "    def to_tensor(self, input_ids_list, token_type_ids_list, attention_mask_list, labels_list):\n",
    "        input_ids = paddle.to_tensor([input_id for input_id in input_ids_list], dtype='int64')\n",
    "        token_type_ids = paddle.to_tensor([token_type_id for token_type_id in token_type_ids_list], dtype='int64')\n",
    "        attention_masks = paddle.to_tensor([attention_mask for attention_mask in attention_mask_list], dtype='int64')\n",
    "        labels = paddle.to_tensor([[label]for label in labels_list], dtype='int64')\n",
    "        return input_ids, token_type_ids, attention_masks, labels\n",
    "\n",
    "    def __call__(self, examples):\n",
    "        # 获取数据\n",
    "        input_ids_list, token_type_ids_list, attention_mask_list, labels_list = list(zip(*examples))\n",
    "        # 转张量\n",
    "        input_ids, token_type_ids, attention_mask, labels = self.to_tensor(input_ids_list, token_type_ids_list, attention_mask_list, labels_list)\n",
    "        # 返回结果\n",
    "        data = {\n",
    "            'input_ids': input_ids,\n",
    "            'token_type_ids': token_type_ids,\n",
    "            'attention_mask': attention_mask,\n",
    "            'labels': labels,\n",
    "        }\n",
    "        return data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "6543662e-524a-4967-9baa-16be31c6f167",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.053620Z",
     "iopub.status.busy": "2022-03-15T09:37:07.053349Z",
     "iopub.status.idle": "2022-03-15T09:37:07.058579Z",
     "shell.execute_reply": "2022-03-15T09:37:07.058160Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.053602Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# 创建dataloader\n",
    "def create_dataloader(config: Config):\n",
    "    if config.operation == 'train':\n",
    "        # 读取数据\n",
    "        train, dev = read_data(config)\n",
    "        # 构建dataset\n",
    "        train_dataset = SimDataset(train)\n",
    "        dev_dataset = SimDataset(dev)\n",
    "        \n",
    "        # 构建dataloader\n",
    "        collate_fn = Collator(config.tokenizer)\n",
    "        train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,\n",
    "                                    num_workers=0)\n",
    "        dev_dataloader = DataLoader(dev_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=True,\n",
    "                                    num_workers=0)\n",
    "        \n",
    "        return train_dataloader, dev_dataloader\n",
    "    elif config.operation == 'predict':\n",
    "        test, test_len = read_data(config)\n",
    "        test_dataset = SimDataset(test)\n",
    "        collate_fn = Collator(config.tokenizer)\n",
    "        test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size, collate_fn=collate_fn, shuffle=False,\n",
    "                                    num_workers=0)\n",
    "        return test_dataloader, test_len\n",
    "    else:\n",
    "        raise Exception('错误的模型行为!')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5d362f20-7e9e-4187-be08-c99d3f50de97",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "# 自定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "72cef2b6-4199-40e1-911e-e8c965b0f6af",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.059516Z",
     "iopub.status.busy": "2022-03-15T09:37:07.059366Z",
     "iopub.status.idle": "2022-03-15T09:37:07.071924Z",
     "shell.execute_reply": "2022-03-15T09:37:07.071469Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.059498Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "from paddlenlp.transformers import ErnieGramModel,NeZhaModel\n",
    "\n",
    "\n",
    "class FCModel(nn.Layer):\n",
    "    def __init__(self, bert_model, ouput_num):\n",
    "        super().__init__()\n",
    "        self.lin1 = nn.Linear(\n",
    "            in_features=128,\n",
    "            out_features=64,\n",
    "            weight_attr=paddle.ParamAttr(\n",
    "                initializer=nn.initializer.TruncatedNormal(\n",
    "                    std=bert_model.config['initializer_range'])))\n",
    "\n",
    "        self.lin2 = nn.Linear(in_features=64, out_features=16)\n",
    "        self.lin3 = nn.Linear(in_features=16, out_features=ouput_num)\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        self.drop = nn.Dropout(p=0.02)\n",
    "        self.softmax = nn.Softmax(axis=1)\n",
    "\n",
    "    def forward(self, sequence_output):\n",
    "\n",
    "        # 线性层1\n",
    "        x = self.lin1(sequence_output)\n",
    "        x = self.relu(x)\n",
    "        x = self.drop(x)\n",
    "        # 线性层2\n",
    "        x = self.lin2(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.drop(x)\n",
    "        # 线性层3\n",
    "        logits = self.lin3(x)\n",
    "        return self.softmax(logits)\n",
    "\n",
    "\n",
    "from paddlenlp.transformers.ernie.modeling import ErniePooler\n",
    "class SimModel(nn.Layer):\n",
    "\n",
    "    def __init__(self, model_path, num_labels, max_seq_len):\n",
    "        super().__init__()\n",
    "        self.model_path = model_path\n",
    "        # 创建基础版的bert模型\n",
    "        self.bert = ErnieGramModel.from_pretrained(model_path)\n",
    "        # 创建RNN层\n",
    "        self.lstm = nn.LSTM(input_size=768,hidden_size=64,direction='bidirect')\n",
    "        self.gru = nn.GRU(input_size=128,hidden_size=64,direction='bidirect')\n",
    "        # bert_pooler\n",
    "        self.bert_pooler = ErniePooler(128)\n",
    "        # 创建卷积层\n",
    "        self.cnn1 = nn.Sequential(\n",
    "            nn.Conv1D(in_channels=max_seq_len, out_channels=16, kernel_size=2, stride=1),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool1D(3)\n",
    "        )\n",
    "        self.cnn2 = nn.Sequential(\n",
    "            nn.Conv1D(in_channels=16, out_channels=8, kernel_size=2, stride=1),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool1D(2)\n",
    "        )\n",
    "\n",
    "        # 创建线性推理层\n",
    "        self.classifier = FCModel(self.bert, num_labels)\n",
    "\n",
    "    def forward(self,\n",
    "                input_ids,\n",
    "                token_type_ids=None,\n",
    "                position_ids=None,\n",
    "                attention_mask=None):\n",
    "        # BERT推理\n",
    "        sequence_output, _ = self.bert(\n",
    "                input_ids,\n",
    "                token_type_ids=token_type_ids,\n",
    "                position_ids=position_ids,\n",
    "                attention_mask=attention_mask)\n",
    "        # ------------ RNN ------------ #\n",
    "        # 维度转换，RNN的输入是 [time_steps,batch_size,input_size]\n",
    "        sequence_output = paddle.transpose(sequence_output, [1,0,2])\n",
    "        # RNN\n",
    "        sequence_output, _ = self.lstm(sequence_output)\n",
    "        sequence_output, _ = self.gru(sequence_output)\n",
    "        # 维度转换，RNN的输出是 [time_steps, batch_size, num_directions * hidden_size]\n",
    "        sequence_output = paddle.transpose(sequence_output, [1,0,2])\n",
    "        # ------------ RNN ------------ #\n",
    "\n",
    "        lin_input = self.bert_pooler(sequence_output)\n",
    "\n",
    "        # # ------------ CNN ------------ #\n",
    "        # cnn_output = self.cnn1(sequence_output)\n",
    "        # cnn_output = self.cnn2(cnn_output)\n",
    "\n",
    "        # cnn_output = paddle.transpose(cnn_output, [0, 2, 1]) # 需要一个个channel的数据拼接，所以需要先转换维度\n",
    "        # lin_input = paddle.reshape(cnn_output, [paddle.shape(cnn_output)[0], -1]) # 展平，用于线性推理\n",
    "        # # ------------ CNN ------------ #\n",
    "\n",
    "        # 线性推理\n",
    "        logits = self.classifier(lin_input)\n",
    "        return logits"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "59f96837-5f84-4786-a802-dbe7c7815893",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "# 定义训练、校验、预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "4c74e36b-b1b9-4108-9dd1-632c83bd4955",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.073002Z",
     "iopub.status.busy": "2022-03-15T09:37:07.072638Z",
     "iopub.status.idle": "2022-03-15T09:37:07.078555Z",
     "shell.execute_reply": "2022-03-15T09:37:07.078104Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.072982Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# 校验\n",
    "@paddle.no_grad()\n",
    "def evaluation(model, loss_fn, metric, val_dataloder):\n",
    "    accu = []\n",
    "    model.eval()\n",
    "    metric.reset()\n",
    "    losses = []\n",
    "    for iter_id, mini_batch in tqdm(enumerate(val_dataloder)):\n",
    "        input_ids = mini_batch['input_ids']\n",
    "        token_type_ids = mini_batch['token_type_ids']\n",
    "        attention_mask = mini_batch['attention_mask']\n",
    "        labels = mini_batch['labels']\n",
    "\n",
    "        logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n",
    "\n",
    "        loss = loss_fn(logits, labels)\n",
    "        losses.append(loss.numpy())\n",
    "        probs = logits\n",
    "        # probs = nn.functional.softmax(logits, axis=1)\n",
    "        correct = metric.compute(probs, labels)\n",
    "        metric.update(correct)\n",
    "        acc = metric.accumulate()\n",
    "        accu.append(acc)\n",
    "    \n",
    "    model.train()\n",
    "    metric.reset()\n",
    "\n",
    "    return np.mean(losses), np.mean(accu)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "307491fa-f440-4195-9dff-ae645cf79f0d",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.079580Z",
     "iopub.status.busy": "2022-03-15T09:37:07.079298Z",
     "iopub.status.idle": "2022-03-15T09:37:07.086354Z",
     "shell.execute_reply": "2022-03-15T09:37:07.085916Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.079561Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# 预测\n",
    "def predict(config: Config):\n",
    "    \"\"\"\n",
    "    预测函数\n",
    "    \"\"\"\n",
    "    test = pd.read_csv('data/data52714/' + config.dataset + '/test.tsv', sep='\\t', names=['text_a', 'text_b'])\n",
    "    results = np.zeros((len(test),2))\n",
    "    for k in range(config.k_flod):\n",
    "        # 加载模型\n",
    "        conf.tokenizer = AutoTokenizer.from_pretrained('./checkpoint/'+config.dataset+'/k_flod_head/'+str(k)+'/'+config.model_path)\n",
    "        model = SimModel(config.model_path, config.num_labels, config.max_seq_len)\n",
    "        load_layer_state_dict = paddle.load('./checkpoint/'+config.dataset+'/k_flod_head/'+str(k)+'/'+config.model_path+'/model.pdparams')\n",
    "        model.set_state_dict(load_layer_state_dict)\n",
    "        # model = AutoModelForSequenceClassification.from_pretrained('./checkpoint/'+conf.dataset+'/k_flod/'+conf.model_path+'_'+str(k))\n",
    "        # 读取数据\n",
    "        test_dataloader, test_len = create_dataloader(config)\n",
    "        k_result = []\n",
    "        model.eval()\n",
    "        for iter_id, mini_batch in tqdm(enumerate(test_dataloader)):\n",
    "            input_ids = mini_batch['input_ids']\n",
    "            token_type_ids = mini_batch['token_type_ids']\n",
    "            attention_mask = mini_batch['attention_mask']\n",
    "            \n",
    "            logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n",
    "            # probs = nn.functional.softmax(logits, axis=1)\n",
    "            probs = logits\n",
    "            k_result.append(probs.numpy())\n",
    "\n",
    "        results += np.vstack(k_result)\n",
    "    results /= 5\n",
    "    return np.argmax(results, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "56d4411c-7355-4b04-84e8-22407dd63ac5",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.088331Z",
     "iopub.status.busy": "2022-03-15T09:37:07.088164Z",
     "iopub.status.idle": "2022-03-15T09:37:07.100894Z",
     "shell.execute_reply": "2022-03-15T09:37:07.100401Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.088312Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "import time \n",
    "import datetime\n",
    "# 训练\n",
    "def train(config: Config):\n",
    "    # 多折交叉训练\n",
    "    for k in range(config.k_flod):\n",
    "        k += 3\n",
    "        # 读取数据\n",
    "        train_dataloader, dev_dataloader = create_dataloader(conf)\n",
    "\n",
    "        # 创建模型\n",
    "        # model = AutoModelForSequenceClassification.from_pretrained(config.model_path,num_classes=config.num_labels)\n",
    "        model = SimModel(config.model_path, config.num_labels, config.max_seq_len)\n",
    "        # 定义优化器\n",
    "        num_training_steps = len(train_dataloader) * config.epochs\n",
    "        lr_scheduler = LinearDecayWithWarmup(config.learning_rate, num_training_steps, 0.1)\n",
    "        decay_params = [\n",
    "            p.name for n, p in model.named_parameters()\n",
    "            if not any(nd in n for nd in [\"bias\", \"norm\"])\n",
    "        ]\n",
    "        opt = optimizer.AdamW(learning_rate=lr_scheduler,\n",
    "                            parameters=model.parameters(),\n",
    "                            weight_decay=0.01,\n",
    "                            apply_decay_param_fun=lambda x: x in decay_params)\n",
    "        # 定义损失函数\n",
    "        loss_fn = nn.loss.CrossEntropyLoss()\n",
    "        metric = paddle.metric.Accuracy()\n",
    "        # 检测是否添加对抗训练\n",
    "        if conf.adv == 'fgm':\n",
    "            adver_method = extra_fgm.FGM(model)\n",
    "\n",
    "        best_acc = 0\n",
    "\n",
    "        # 遍历训练次数训练\n",
    "        for epoch in range(config.epochs):\n",
    "            starttime = datetime.datetime.now()\n",
    "            model.train()\n",
    "            for iter_id, mini_batch in enumerate(train_dataloader):\n",
    "                input_ids = mini_batch['input_ids']\n",
    "                token_type_ids = mini_batch['token_type_ids']\n",
    "                attention_mask = mini_batch['attention_mask']\n",
    "                labels = mini_batch['labels']\n",
    "\n",
    "                logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n",
    "                # 计算损失值\n",
    "                loss = loss_fn(logits, labels)\n",
    "                # 计算具体值并校验\n",
    "                # probs = paddle.nn.functional.softmax(logits, axis=1)\n",
    "                probs = logits\n",
    "                correct = metric.compute(probs, labels)\n",
    "                metric.update(correct)\n",
    "                acc = metric.accumulate()\n",
    "\n",
    "                loss.backward()\n",
    "                # 检测是否使用对抗训练\n",
    "                if conf.adv == 'fgm':\n",
    "                    # 计算x+r的前向loss, 反向传播得到梯度，然后累加到(1)的梯度上；\n",
    "                    adver_method.attack(epsilon=conf.eps)\n",
    "                    # 计算x+r的前向loss\n",
    "                    logits_adv = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)\n",
    "                    loss_adv = loss_fn(logits_adv, labels)\n",
    "                    # 反向传播得到梯度，然后累加到(1)的梯度上；\n",
    "                    loss_adv.backward()\n",
    "                    # 将embedding恢复为（1）时的embedding；\n",
    "                    adver_method.restore()\n",
    "                # 反向传播\n",
    "                opt.step()\n",
    "                lr_scheduler.step()\n",
    "                opt.clear_grad()\n",
    "                # 打印模型性能\n",
    "                if iter_id%config.print_loss == 0:\n",
    "                    print('k:{}, epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(k, epoch, iter_id, loss, acc))\n",
    "            # 运行完一个epoch验证机校验\n",
    "            avg_val_loss, avg_val_acc = evaluation(model, loss_fn, metric, dev_dataloader)\n",
    "            endtime = datetime.datetime.now()\n",
    "            print('-' * 50)\n",
    "            print('k:{}, epoch: {}, val_loss: {}, val_acc: {}, duration: {}'.format(k, epoch, avg_val_loss, avg_val_acc, (endtime - starttime).seconds))\n",
    "            print('-' * 50)\n",
    "\n",
    "        layer_state_dict = model.state_dict()\n",
    "        paddle.save(layer_state_dict, './checkpoint/'+config.dataset+'/k_flod_head/'+str(k)+'/'+config.model_path+'/model.pdparams')\n",
    "        config.tokenizer.save_pretrained('./checkpoint/'+config.dataset+'/k_flod_head/'+str(k)+'/'+config.model_path)\n",
    "\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0120b362-c9ea-40b8-8fca-14acd1fec43c",
   "metadata": {
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "4950c107-88c8-4bc9-be9b-15e7f50a898d",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "# 运行模型"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "72c69bb4-10e5-4f56-85ea-9f277bca3ed9",
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "参数记录\n",
    "\n",
    "|数据集|max_seq_len|batch_size|learning_rate|\n",
    "|--|--|--|--|\n",
    "|paws-x|88x2 |128|1e-5|\n",
    "|lcqmc|22x2 |256|5e-6|\n",
    "|bq_corpus|30x2 |256|5e-6|"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "e7a37689-4a2b-477a-ba74-c9dc3d8c097b",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:37:07.101753Z",
     "iopub.status.busy": "2022-03-15T09:37:07.101600Z",
     "iopub.status.idle": "2022-03-15T09:38:25.386650Z",
     "shell.execute_reply": "2022-03-15T09:38:25.386083Z",
     "shell.execute_reply.started": "2022-03-15T09:37:07.101735Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[2022-03-15 17:37:07,128] [    INFO] - Already cached /home/aistudio/.paddlenlp/models/ernie-gram-zh/ernie_gram_zh.pdparams\n",
      "W0315 17:37:07.131204 25765 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 10.1, Runtime API Version: 10.1\n",
      "W0315 17:37:07.134528 25765 device_context.cc:465] device: 0, cuDNN Version: 7.6.\n",
      "encode test data: 100%|██████████| 2000/2000 [00:01<00:00, 1371.93it/s]\n",
      "32it [00:06,  5.06it/s]\n",
      "[2022-03-15 17:37:25,982] [    INFO] - Already cached /home/aistudio/.paddlenlp/models/ernie-gram-zh/ernie_gram_zh.pdparams\n",
      "encode test data: 100%|██████████| 2000/2000 [00:01<00:00, 1393.72it/s]\n",
      "32it [00:06,  5.03it/s]\n",
      "[2022-03-15 17:37:43,241] [    INFO] - Already cached /home/aistudio/.paddlenlp/models/ernie-gram-zh/ernie_gram_zh.pdparams\n",
      "encode test data: 100%|██████████| 2000/2000 [00:01<00:00, 1294.00it/s]\n",
      "32it [00:06,  5.02it/s]\n",
      "[2022-03-15 17:37:58,826] [    INFO] - Already cached /home/aistudio/.paddlenlp/models/ernie-gram-zh/ernie_gram_zh.pdparams\n",
      "encode test data: 100%|██████████| 2000/2000 [00:01<00:00, 1224.81it/s]\n",
      "32it [00:06,  5.02it/s]\n",
      "[2022-03-15 17:38:11,933] [    INFO] - Already cached /home/aistudio/.paddlenlp/models/ernie-gram-zh/ernie_gram_zh.pdparams\n",
      "encode test data: 100%|██████████| 2000/2000 [00:01<00:00, 1138.31it/s]\n",
      "32it [00:06,  5.02it/s]\n"
     ]
    }
   ],
   "source": [
    "# 运行程序\n",
    "paddle.device.set_device('gpu:0')\n",
    "\n",
    "conf = Config()\n",
    "conf.operation = 'predict'\n",
    "conf.model_path = 'ernie-gram-zh'\n",
    "conf.dataset = 'paws-x'\n",
    "conf.batch_size = 64\n",
    "conf.epochs = 8\n",
    "conf.max_seq_len = 88*2 # 两句话\n",
    "conf.learning_rate = 1e-5\n",
    "conf.k_flod = 5\n",
    "\n",
    "paddle.seed(conf.random_seed)\n",
    "\n",
    "\n",
    "if conf.operation == 'train': # 训练模型\n",
    "    conf.tokenizer = AutoTokenizer.from_pretrained(conf.model_path)\n",
    "    # 训练\n",
    "    model = train(conf)\n",
    "elif conf.operation == 'predict': # 预测模型\n",
    "    # 预测\n",
    "    predict_labels = predict(conf)\n",
    "    # 保存结果\n",
    "    test_df = pd.DataFrame(predict_labels, columns=[ 'prediction'])\n",
    "    test_df['index'] = test_df.index\n",
    "    test_df.to_csv('result/' + conf.dataset + '.tsv', index=False, columns=['index', 'prediction'], sep='\\t')\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "04599ae2-1413-4c12-afbb-87ca6a8e3f91",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-03-15T09:38:25.388220Z",
     "iopub.status.busy": "2022-03-15T09:38:25.387957Z",
     "iopub.status.idle": "2022-03-15T09:38:25.391222Z",
     "shell.execute_reply": "2022-03-15T09:38:25.390780Z",
     "shell.execute_reply.started": "2022-03-15T09:38:25.388197Z"
    },
    "scrolled": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "# # 预测结果\n",
    "# paddle.device.set_device('gpu:0')\n",
    "\n",
    "# conf = Config()\n",
    "# conf.operation = 'predict'\n",
    "# conf.model_path = 'ernie-gram-zh'\n",
    "# conf.dataset = 'paws-x'\n",
    "\n",
    "# # 加载模型\n",
    "# model = AutoModelForSequenceClassification.from_pretrained('./checkpoint/'+conf.dataset+'/'+conf.model_path+conf.model_suffix)\n",
    "# conf.tokenizer = AutoTokenizer.from_pretrained('./checkpoint/'+conf.dataset+'/'+conf.model_path+conf.model_suffix)\n",
    "# # 读取数据\n",
    "# test_dataloader = create_dataloader(conf)\n",
    "# # 预测\n",
    "# predict_labels = predict(model, test_dataloader)\n",
    "# # 保存结果\n",
    "# test_df = pd.DataFrame(predict_labels, columns=[ 'prediction'])\n",
    "# test_df['index'] = test_df.index\n",
    "# test_df.to_csv('result/' + conf.dataset + '.tsv', index=False, columns=['index', 'prediction'], sep='\\t')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}