{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/siyuan/miniconda3/envs/search/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "/home/siyuan/miniconda3/envs/search/lib/python3.10/site-packages/_distutils_hack/__init__.py:33: UserWarning: Setuptools is replacing distutils.\n",
      "  warnings.warn(\"Setuptools is replacing distutils.\")\n"
     ]
    }
   ],
   "source": [
    "import argparse\n",
    "import os\n",
    "import random\n",
    "import time\n",
    "from functools import partial\n",
    "\n",
    "import numpy as np\n",
    "import paddle\n",
    "import pandas as pd\n",
    "from data import convert_pairwise_example\n",
    "from model import PairwiseMatching\n",
    "from tqdm import tqdm\n",
    "\n",
    "from paddlenlp.data import Pad, Stack, Tuple\n",
    "from paddlenlp.datasets import load_dataset\n",
    "from paddlenlp.transformers import AutoModel, AutoTokenizer, LinearDecayWithWarmup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面加载训练集和验证集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_train(data_path): \n",
    "    with open(data_path, 'r', encoding='utf-8') as f: \n",
    "        flag = 0\n",
    "        for line in f: \n",
    "            if flag != 0: \n",
    "                data = line.rstrip().split(\"\\t\") \n",
    "                if len(data) != 3: \n",
    "                    continue\n",
    "                yield {'query': data[0], 'title': data[1], 'neg_title': data[2]} \n",
    "                                                                                   \n",
    "            flag = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def read_dev(data_path): \n",
    "    with open(data_path, 'r', encoding='utf-8') as f: \n",
    "        flag = 0\n",
    "        for line in f: \n",
    "            if flag != 0: \n",
    "                data = line.rstrip().split(\"\\t\") \n",
    "                if len(data) != 3: \n",
    "                    continue\n",
    "                yield {'query': data[0], 'title': data[1], 'label': data[2]} \n",
    "                                                                               \n",
    "            flag = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_file = \"rank_dataset/train_pairwise.csv\"\n",
    "train_ds = load_dataset(read_train, data_path=train_file, lazy=False) \n",
    "\n",
    "dev_file = \"rank_dataset/dev_pairwise.csv\"\n",
    "dev_ds = load_dataset(read_dev, data_path=dev_file, lazy=False) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'query': '英语委婉语引起的跨文化交际障碍', 'title': '英语委婉语引起的跨文化交际障碍及其翻译策略研究英语委婉语,跨文化交际障碍,翻译策略', 'neg_title': '委婉语在英语和汉语中的文化差异委婉语,文化,跨文化交际'}\n",
      "{'query': '范迪慧 嘉兴市中医院', 'title': '滋阴疏肝汤联合八穴隔姜灸治疗肾虚肝郁型卵巢功能低下的临床疗效滋阴疏肝汤,八穴隔姜灸,肾虚肝郁型卵巢功能低下,性脉甾类激素,妊娠', 'neg_title': '温针灸、中药薰蒸在半月板损伤术后康复中的疗效分析膝损伤,半月板,胫骨,中医康复,温针疗法,薰洗'}\n",
      "{'query': '灰色关联分析', 'title': '灰色关联分析评价不同产地金果榄质量金果榄;灰色关联分析法;主成分分析法;盐酸巴马汀;盐酸药根碱', 'neg_title': '江西省某三级甲等医院2型糖尿病患者次均住院费用新灰色关联分析2型糖尿病,次均住院费用,新灰色关联分析,结构变动度'}\n"
     ]
    }
   ],
   "source": [
    "# 输出训练数据集的前3条数据看一下\n",
    "for i in range(3):\n",
    "    print(train_ds[i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'query': '作者单位:南州中学', 'title': '浅谈初中教学管理如何体现人文关怀初中教育,教学管理,人文关怀', 'label': '1'}\n",
      "{'query': '作者单位:南州中学', 'title': '高中美术课堂教学中藏区本土民间艺术的融入路径藏区,传统民间艺术,美术课堂', 'label': '0'}\n",
      "{'query': '作者单位:南州中学', 'title': '列宁关于资产阶级民主革命向 社会主义革命过渡的理论列宁,直接过渡,间接过渡,资产阶级民主革命,社会主义革命', 'label': '0'}\n"
     ]
    }
   ],
   "source": [
    "# 输出验证数据集的前3条数据看一下\n",
    "for i in range(3):\n",
    "    print(dev_ds[i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m[2024-01-28 10:04:51,604] [    INFO]\u001b[0m - We are using (<class 'paddlenlp.transformers.ernie.tokenizer.ErnieTokenizer'>, False) to load 'ernie-3.0-medium-zh'.\u001b[0m\n",
      "\u001b[32m[2024-01-28 10:04:51,605] [    INFO]\u001b[0m - Already cached /home/siyuan/.paddlenlp/models/ernie-3.0-medium-zh/ernie_3.0_medium_zh_vocab.txt\u001b[0m\n",
      "\u001b[32m[2024-01-28 10:04:51,630] [    INFO]\u001b[0m - tokenizer config file saved in /home/siyuan/.paddlenlp/models/ernie-3.0-medium-zh/tokenizer_config.json\u001b[0m\n",
      "\u001b[32m[2024-01-28 10:04:51,631] [    INFO]\u001b[0m - Special tokens file saved in /home/siyuan/.paddlenlp/models/ernie-3.0-medium-zh/special_tokens_map.json\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "model_name = \"ernie-3.0-medium-zh\"\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "trans_func_train = partial(convert_pairwise_example, tokenizer=tokenizer, max_seq_length=128, phase=\"train\")\n",
    "\n",
    "trans_func_eval = partial(convert_pairwise_example, tokenizer=tokenizer, max_seq_length=128, phase=\"eval\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'query': '英语委婉语引起的跨文化交际障碍', 'title': '英语委婉语引起的跨文化交际障碍及其翻译策略研究英语委婉语,跨文化交际障碍,翻译策略', 'neg_title': '委婉语在英语和汉语中的文化差异委婉语,文化,跨文化交际'}\n",
      "英语委婉语引起的跨文化交际障碍\n",
      "英语委婉语引起的跨文化交际障碍及其翻译策略研究英语委婉语,跨文化交际障碍,翻译策略\n",
      "委婉语在英语和汉语中的文化差异委婉语,文化,跨文化交际\n",
      "---------------------------------------------\n",
      "{'input_ids': [1, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 2, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 107, 63, 1197, 1285, 584, 750, 166, 229, 514, 405, 298, 2681, 405, 30, 1465, 68, 73, 276, 430, 843, 1767, 30, 1197, 1285, 584, 750, 2], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n",
      "{'input_ids': [1, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 2, 298, 2681, 405, 11, 514, 405, 14, 657, 405, 12, 5, 68, 73, 859, 712, 298, 2681, 405, 30, 68, 73, 30, 1465, 68, 73, 276, 430, 2], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n",
      "---------------------------------------------\n",
      "[1, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 2, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 107, 63, 1197, 1285, 584, 750, 166, 229, 514, 405, 298, 2681, 405, 30, 1465, 68, 73, 276, 430, 843, 1767, 30, 1197, 1285, 584, 750, 2]\n",
      "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n",
      "---------------------------------------------\n",
      "[1, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 2, 298, 2681, 405, 11, 514, 405, 14, 657, 405, 12, 5, 68, 73, 859, 712, 298, 2681, 405, 30, 68, 73, 30, 1465, 68, 73, 276, 430, 2]\n",
      "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n",
      "---------------------------------------------\n",
      "[[1, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 2, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 107, 63, 1197, 1285, 584, 750, 166, 229, 514, 405, 298, 2681, 405, 30, 1465, 68, 73, 276, 430, 843, 1767, 30, 1197, 1285, 584, 750, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 514, 405, 298, 2681, 405, 477, 200, 5, 1465, 68, 73, 276, 430, 843, 1767, 2, 298, 2681, 405, 11, 514, 405, 14, 657, 405, 12, 5, 68, 73, 859, 712, 298, 2681, 405, 30, 68, 73, 30, 1465, 68, 73, 276, 430, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/siyuan/miniconda3/envs/search/lib/python3.10/site-packages/paddlenlp/transformers/tokenizer_utils_base.py:2353: FutureWarning: The `max_seq_len` argument is deprecated and will be removed in a future version, please use `max_length` instead.\n",
      "  warnings.warn(\n",
      "/home/siyuan/miniconda3/envs/search/lib/python3.10/site-packages/paddlenlp/transformers/tokenizer_utils_base.py:1925: UserWarning: Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "#演示\n",
    "example = train_ds[0]\n",
    "print(example)\n",
    "\n",
    "query, pos_title, neg_title = example[\"query\"], example[\"title\"], example[\"neg_title\"]\n",
    "print(query)\n",
    "print(pos_title)\n",
    "print(neg_title)\n",
    "print('---------------------------------------------')\n",
    "\n",
    "pos_inputs = tokenizer(text=query, text_pair=pos_title, max_seq_len=128) \n",
    "neg_inputs = tokenizer(text=query, text_pair=neg_title, max_seq_len=128) \n",
    "\n",
    "print(pos_inputs)\n",
    "print(neg_inputs)\n",
    "print('---------------------------------------------')\n",
    "\n",
    "pos_input_ids = pos_inputs[\"input_ids\"] \n",
    "pos_token_type_ids = pos_inputs[\"token_type_ids\"] \n",
    "\n",
    "print(pos_input_ids)\n",
    "print(pos_token_type_ids)\n",
    "print('---------------------------------------------')\n",
    "\n",
    "neg_input_ids = neg_inputs[\"input_ids\"] \n",
    "neg_token_type_ids = neg_inputs[\"token_type_ids\"] \n",
    "print(neg_input_ids)\n",
    "print(neg_token_type_ids)\n",
    "print('---------------------------------------------')\n",
    "\n",
    "result = [pos_input_ids, pos_token_type_ids, neg_input_ids, neg_token_type_ids]\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def batchify_fn_train(samples): \n",
    "    fn = Tuple(\n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype=\"int64\"),  \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype=\"int64\"),  \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype=\"int64\"),  \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype=\"int64\"),  \n",
    "    )\n",
    "\n",
    "    processed_samples = fn(samples) \n",
    "\n",
    "    result = []\n",
    "    for data in processed_samples:\n",
    "        result.append(data) \n",
    "\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def batchify_fn_eval(samples): \n",
    "    fn = Tuple(\n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype=\"int64\"),  \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype=\"int64\"),  \n",
    "        Stack(dtype=\"int64\"), \n",
    "    )\n",
    "\n",
    "    processed_samples = fn(samples) \n",
    "\n",
    "    result = []\n",
    "    for data in processed_samples:\n",
    "        result.append(data) \n",
    "\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_sampler_train = paddle.io.DistributedBatchSampler(train_ds, batch_size=32, shuffle=True)\n",
    "\n",
    "train_data_loader = paddle.io.DataLoader(dataset=train_ds.map(trans_func_train), batch_sampler=batch_sampler_train, collate_fn=batchify_fn_train, return_list=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_sampler_dev = paddle.io.BatchSampler(dev_ds, batch_size=32, shuffle=False)\n",
    "\n",
    "dev_data_loader = paddle.io.DataLoader(dataset=dev_ds.map(trans_func_eval), batch_sampler=batch_sampler_dev, collate_fn=batchify_fn_eval, return_list=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面搭建模型，并开始训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m[2024-01-28 10:04:57,488] [    INFO]\u001b[0m - We are using <class 'paddlenlp.transformers.ernie.modeling.ErnieModel'> to load 'ernie-3.0-medium-zh'.\u001b[0m\n",
      "\u001b[32m[2024-01-28 10:04:57,490] [    INFO]\u001b[0m - Already cached /home/siyuan/.paddlenlp/models/ernie-3.0-medium-zh/model_state.pdparams\u001b[0m\n",
      "\u001b[32m[2024-01-28 10:04:57,491] [    INFO]\u001b[0m - Loading weights file model_state.pdparams from cache at /home/siyuan/.paddlenlp/models/ernie-3.0-medium-zh/model_state.pdparams\u001b[0m\n",
      "\u001b[32m[2024-01-28 10:04:57,774] [    INFO]\u001b[0m - Loaded weights file from disk, setting weights to model.\u001b[0m\n",
      "\u001b[33m[2024-01-28 10:05:02,189] [ WARNING]\u001b[0m - Some weights of the model checkpoint at ernie-3.0-medium-zh were not used when initializing ErnieModel: ['ernie.encoder.layers.6.linear1.bias', 'ernie.encoder.layers.6.linear1.weight', 'ernie.encoder.layers.6.linear2.bias', 'ernie.encoder.layers.6.linear2.weight', 'ernie.encoder.layers.6.norm1.bias', 'ernie.encoder.layers.6.norm1.weight', 'ernie.encoder.layers.6.norm2.bias', 'ernie.encoder.layers.6.norm2.weight', 'ernie.encoder.layers.6.self_attn.k_proj.bias', 'ernie.encoder.layers.6.self_attn.k_proj.weight', 'ernie.encoder.layers.6.self_attn.out_proj.bias', 'ernie.encoder.layers.6.self_attn.out_proj.weight', 'ernie.encoder.layers.6.self_attn.q_proj.bias', 'ernie.encoder.layers.6.self_attn.q_proj.weight', 'ernie.encoder.layers.6.self_attn.v_proj.bias', 'ernie.encoder.layers.6.self_attn.v_proj.weight']\n",
      "- This IS expected if you are initializing ErnieModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing ErnieModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\u001b[0m\n",
      "\u001b[33m[2024-01-28 10:05:02,193] [ WARNING]\u001b[0m - Some weights of ErnieModel were not initialized from the model checkpoint at ernie-3.0-medium-zh and are newly initialized: ['ernie.pooler.dense.bias', 'ernie.pooler.dense.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "pretrained_model = AutoModel.from_pretrained(model_name) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = PairwiseMatching(pretrained_model, margin=0.1) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "epochs = 3 \n",
    "\n",
    "num_training_steps = len(train_data_loader) * epochs \n",
    "\n",
    "lr_scheduler = LinearDecayWithWarmup(2E-5, num_training_steps, 0.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "decay_params = [\n",
    "    p.name for n, p in model.named_parameters() \n",
    "    if not any(nd in n for nd in [\"bias\", \"norm\"])\n",
    "]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "optimizer = paddle.optimizer.AdamW(\n",
    "    learning_rate=lr_scheduler,\n",
    "    parameters=model.parameters(),\n",
    "    weight_decay=0.0,\n",
    "    apply_decay_param_fun=lambda x: x in decay_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "metric = paddle.metric.Auc() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "@paddle.no_grad() \n",
    "def evaluate(model, metric, data_loader): \n",
    "    model.eval() \n",
    "    metric.reset()\n",
    " \n",
    "    for idx, batch in enumerate(data_loader): \n",
    "        input_ids, token_type_ids, labels = batch\n",
    "        pos_probs = model.predict(input_ids=input_ids, token_type_ids=token_type_ids) \n",
    "        neg_probs = 1.0 - pos_probs \n",
    "        preds = np.concatenate((neg_probs, pos_probs), axis=1) \n",
    "\n",
    "        metric.update(preds=preds, labels=labels) \n",
    "        auc = metric.accumulate() \n",
    "\n",
    "    print(\"phase: dev, auc: {:.3}\".format(auc)) \n",
    "    metric.reset()\n",
    "    model.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面正式开始训练模型\n",
    "\n",
    "save_dir=\"model_param\"\n",
    "global_step = 0\n",
    "tic_train = time.time()\n",
    "\n",
    "for epoch in range(1, epochs + 1):\n",
    "    for step, batch in enumerate(train_data_loader, start=1): \n",
    "        pos_input_ids, pos_token_type_ids, neg_input_ids, neg_token_type_ids = batch\n",
    "\n",
    "        loss = model(\n",
    "            pos_input_ids=pos_input_ids,\n",
    "            neg_input_ids=neg_input_ids,\n",
    "            pos_token_type_ids=pos_token_type_ids,\n",
    "            neg_token_type_ids=neg_token_type_ids,\n",
    "        ) \n",
    "\n",
    "        global_step += 1\n",
    "\n",
    "        if global_step % 10 == 0: \n",
    "            print(\"global step %d, epoch: %d, batch: %d, loss: %.5f, speed: %.2f step/s\"\n",
    "                % (global_step, epoch, step, loss, 10 / (time.time() - tic_train)))\n",
    "            tic_train = time.time()\n",
    "\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        lr_scheduler.step()\n",
    "        optimizer.clear_grad()\n",
    "\n",
    "        if global_step % 100 == 0:\n",
    "            evaluate(model, metric, dev_data_loader)\n",
    "            \n",
    "            save_path = os.path.join(save_dir, \"model_%d\" % global_step) \n",
    "            if not os.path.exists(save_path):\n",
    "                os.makedirs(save_path)\n",
    "\n",
    "            save_param_path = os.path.join(save_path, \"model_state.pdparams\") \n",
    "            paddle.save(model.state_dict(), save_param_path)\n",
    "            tokenizer.save_pretrained(save_path) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/siyuan/miniconda3/envs/search/lib/python3.10/site-packages/paddlenlp/transformers/tokenizer_utils_base.py:2353: FutureWarning: The `max_seq_len` argument is deprecated and will be removed in a future version, please use `max_length` instead.\n",
      "  warnings.warn(\n",
      "/home/siyuan/miniconda3/envs/search/lib/python3.10/site-packages/paddlenlp/transformers/tokenizer_utils_base.py:3049: UserWarning: Be aware, overflowing tokens are not returned for the setting you have chosen, i.e. sequence pairs with the 'longest_first' truncation strategy. So the returned list will always be empty even if some tokens have been removed.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "phase: dev, auc: 0.773\n"
     ]
    }
   ],
   "source": [
    "# 下面是把保存好的模型参数文件加载到一个新的模型中，并进行性能测试的过程\n",
    "\n",
    "model_2 = PairwiseMatching(pretrained_model, margin=0.1)\n",
    "\n",
    "params_path = \"model_param/model_400/model_state.pdparams\"\n",
    "\n",
    "state_dict = paddle.load(params_path)\n",
    "model_2.set_dict(state_dict)\n",
    "\n",
    "evaluate(model_2, metric, dev_data_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "search",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
