{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:06.707179300Z",
     "start_time": "2025-02-11T07:13:06.677547Z"
    }
   },
   "outputs": [],
   "source": [
    "# 这个代码文件是把一个预训练模型ERNIE1.0构成的神经网络 利用in-batch-negative方法进行训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.883676600Z",
     "start_time": "2025-02-11T07:13:06.677547Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\17362\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "C:\\Users\\17362\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\_distutils_hack\\__init__.py:33: UserWarning: Setuptools is replacing distutils.\n",
      "  warnings.warn(\"Setuptools is replacing distutils.\")\n"
     ]
    }
   ],
   "source": [
    "import abc\n",
    "import sys\n",
    "import argparse\n",
    "import os\n",
    "import random\n",
    "import time\n",
    "import numpy as np\n",
    "from scipy import stats\n",
    "import pandas as pd\n",
    "from tqdm import tqdm \n",
    "from scipy.special import softmax\n",
    "from scipy.special import expit\n",
    "import paddle\n",
    "import paddle.nn as nn\n",
    "import paddle.nn.functional as F\n",
    "from paddle import inference\n",
    "import paddlenlp\n",
    "from paddlenlp.data import Stack, Tuple, Pad\n",
    "from paddlenlp.datasets import load_dataset, MapDataset\n",
    "from paddlenlp.transformers import LinearDecayWithWarmup\n",
    "from paddlenlp.utils.downloader import get_path_from_url\n",
    "from visualdl import LogWriter\n",
    "from data import convert_pairwise_example"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.895346900Z",
     "start_time": "2025-02-11T07:13:11.883676600Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": "Place(cpu)"
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 所有的模型训练和推理操作将会在计算机的 中央处理器 (CPU) 上进行，而不是 GPU\n",
    "paddle.set_device(\"cpu\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.895346900Z",
     "start_time": "2025-02-11T07:13:11.888229Z"
    }
   },
   "outputs": [],
   "source": [
    "def read_text_pair(data_path): # 读取路径为data_path的文件的内容\n",
    "  with open(data_path, 'r', encoding='utf-8') as f: # r只读,f表示文件(包含只读和编码方式)\n",
    "        for line in f: # 读取文件中的每一行, line即每一行的内容\n",
    "            data = line.rstrip().split(\"\\t\") # rstrip()移除字符串末尾的空格或换行符,split(\"\\t\")按照Tab分割字符串line, 生成一个列表data\n",
    "            if len(data) != 2: # 检查读取的这一行是否包含 2 个部分, 确保数据格式的正确, 这两部分语义相似 \n",
    "                continue\n",
    "            # 字典类型, yield是一个类似return的关键字\n",
    "            # yield 把 read_text_pair函数变成一个生成器generator\n",
    "            # 作用是 把 每个 for循环 返回 一个 字典数据\n",
    "            yield {'text_a': data[0], 'text_b': data[1]} \n",
    "                                                         \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.922296300Z",
     "start_time": "2025-02-11T07:13:11.895346900Z"
    }
   },
   "outputs": [],
   "source": [
    "# 在这个阶段的 微调中, 我们使用的是文献数据集, 数据集中的每一行由2个部分组成, 一个是 查询的内容, 一个是 文献标题 + 关键字\n",
    "train_set_path='recall_dataset/train.csv'  # 训练数据集所在的位置\n",
    "# lazy=False 表示会在调用时直接加载数据，而 lazy=True 则是惰性加载，数据只有在需要时才会加载\n",
    "# 可以让 train_ds的类型 变为 MapDataset类型\n",
    "train_ds = load_dataset(read_text_pair, data_path=train_set_path, lazy=False) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.906054400Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'paddlenlp.datasets.dataset.MapDataset'>\n"
     ]
    }
   ],
   "source": [
    "print(type(train_ds))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.911105200Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'text_a': '从《唐律疏义》看唐代封爵贵族的法律特权', 'text_b': '从《唐律疏义》看唐代封爵贵族的法律特权《唐律疏义》,封爵贵族,法律特权'}\n",
      "{'text_a': '宁夏社区图书馆服务体系布局现状分析', 'text_b': '宁夏社区图书馆服务体系布局现状分析社区图书馆,社区图书馆服务,社区图书馆服务体系'}\n",
      "{'text_a': '人口老龄化对京津冀经济', 'text_b': '京津冀人口老龄化对区域经济增长的影响京津冀,人口老龄化,区域经济增长,固定效应模型'}\n"
     ]
    }
   ],
   "source": [
    "\n",
    "for i in range(3):\n",
    "    print(train_ds[i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.917033500Z"
    }
   },
   "outputs": [],
   "source": [
    "# 下面开始构造训练数据的加载器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.917033500Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 15:13:11,917] [    INFO]\u001B[0m - Already cached C:\\Users\\17362\\.paddlenlp\\models\\ernie-1.0\\vocab.txt\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:13:11,922] [    INFO]\u001B[0m - tokenizer config file saved in C:\\Users\\17362\\.paddlenlp\\models\\ernie-1.0\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:13:11,922] [    INFO]\u001B[0m - Special tokens file saved in C:\\Users\\17362\\.paddlenlp\\models\\ernie-1.0\\special_tokens_map.json\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "# 即你要使用的预训练模型的版本是 ERNIE 1.0，它是由 PaddlePaddle 提供的一种中文预训练模型，类似于 BERT，但在某些任务上表现更好，尤其是在中文处理上。\n",
    "MODEL_NAME=\"ernie-1.0\"\n",
    "# 通过 paddlenlp 库加载 ErnieTokenizer，并从预训练模型中获取词汇表及相关分词配置。\n",
    "# ErnieTokenizer 类，用来对文本进行分词和编码，确保输入的文本能与模型的预训练方式相兼容。\n",
    "\n",
    "# from_pretrained(MODEL_NAME): 这个方法会加载指定的模型名称（在这里是 \"ernie-1.0\"）对应的预训练 tokenizer。它会从网络上下载该模型的相关配置和词汇表，并在本地缓存。之后，你就可以使用这个分词器来处理文本数据了。\n",
    "\n",
    "# 分词器, 把一个句子 转换成 整数序列, 因为计算机是 无法直接处理文本的\n",
    "# 分词器 有一个 自带的 词典, 每个词都有对应的索引\n",
    "# 比如 \"人口老龄化对京津冀经济\", 分词器 就可能 把它 切分成 \"人口  老龄化 京津冀 经济\"\n",
    "# 在 词典 中 找到 这些词, 把它们的索引 组成一个整数的 向量 来 返回\n",
    "tokenizer = paddlenlp.transformers.ErnieTokenizer.from_pretrained(MODEL_NAME)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.930461700Z"
    }
   },
   "outputs": [],
   "source": [
    "# 下面这个函数 就是 对 数据集中的数据 进行处理的函数\n",
    "# example是之前加载好的train_ds的一条数据\n",
    "# tokenizer即刚刚设置的分词器\n",
    "# max_seq_length即输入数据的最大长度\n",
    "def convert_example(example, tokenizer, max_seq_length=512): \n",
    "    result = [] # 定义一个空列表, 用于存储接下来要生成的数据\n",
    "    # 遍历example中的每个数据项中的 键 和 值\n",
    "    for key, text in example.items(): \n",
    "        # 利用分词器对text进行处理\n",
    "        # max_seq_len 这个指的是 text句子 中 最大的token数量\n",
    "        # token 就是利用 分词器 对 句子 进行 切分 产生的分词\n",
    "        # 比如 \"人口老龄化对京津冀经济\", 分词器 就可能 把它 切分成 \"人口  老龄化 京津冀 经济\"4个token\n",
    "        # 序列太长, 会被阶段; 序列太短,并不会被填充\n",
    "        encoded_inputs = tokenizer(text=text, max_seq_len=max_seq_length)\n",
    "        input_ids = encoded_inputs[\"input_ids\"] # 分词对应的索引组成的向量\n",
    "        # 分词的类型 组成的 向量\n",
    "        token_type_ids = encoded_inputs[\"token_type_ids\"] \n",
    "        result += [input_ids, token_type_ids]\n",
    "    # [(text_a)input_ids, (text_a)token_type_ids, (text_b)input_ids, (text_b)token_type_ids]\n",
    "    return result "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.934000500Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "从《唐律疏义》看唐代封爵贵族的法律特权\n",
      "{'input_ids': [1, 158, 56, 867, 646, 1500, 393, 55, 335, 867, 140, 898, 2153, 864, 495, 5, 72, 646, 169, 438, 2], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}\n",
      "[1, 158, 56, 867, 646, 1500, 393, 55, 335, 867, 140, 898, 2153, 864, 495, 5, 72, 646, 169, 438, 2]\n",
      "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "------------------------------------\n",
      "从《唐律疏义》看唐代封爵贵族的法律特权《唐律疏义》,封爵贵族,法律特权\n",
      "{'input_ids': [1, 158, 56, 867, 646, 1500, 393, 55, 335, 867, 140, 898, 2153, 864, 495, 5, 72, 646, 169, 438, 56, 867, 646, 1500, 393, 55, 30, 898, 2153, 864, 495, 30, 72, 646, 169, 438, 2], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}\n",
      "[1, 158, 56, 867, 646, 1500, 393, 55, 335, 867, 140, 898, 2153, 864, 495, 5, 72, 646, 169, 438, 56, 867, 646, 1500, 393, 55, 30, 898, 2153, 864, 495, 30, 72, 646, 169, 438, 2]\n",
      "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "------------------------------------\n",
      "[[1, 158, 56, 867, 646, 1500, 393, 55, 335, 867, 140, 898, 2153, 864, 495, 5, 72, 646, 169, 438, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 158, 56, 867, 646, 1500, 393, 55, 335, 867, 140, 898, 2153, 864, 495, 5, 72, 646, 169, 438, 56, 867, 646, 1500, 393, 55, 30, 898, 2153, 864, 495, 30, 72, 646, 169, 438, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\17362\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\paddlenlp\\transformers\\tokenizer_utils_base.py:2293: FutureWarning: The `max_seq_len` argument is deprecated and will be removed in a future version, please use `max_length` instead.\n",
      "  warnings.warn(\n",
      "C:\\Users\\17362\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\paddlenlp\\transformers\\tokenizer_utils_base.py:1865: UserWarning: Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "#演示\n",
    "\n",
    "a=[]\n",
    "for key, text in train_ds[0].items():\n",
    "    print(text)\n",
    "    encoded_inputs = tokenizer(text=text, max_seq_len=512)\n",
    "    print(encoded_inputs)\n",
    "    print(encoded_inputs[\"input_ids\"]) \n",
    "    # 元素都是0, 说明属于同一个句子\n",
    "    print(encoded_inputs[\"token_type_ids\"])\n",
    "    a += [encoded_inputs[\"input_ids\"], encoded_inputs[\"token_type_ids\"]]\n",
    "    print('------------------------------------')\n",
    "    \n",
    "print(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.940354800Z"
    }
   },
   "outputs": [],
   "source": [
    "# 为了后续方便使用, 我们使用python偏函数(partial)给 convert_example 赋予了 一些默认参数\n",
    "from functools import partial\n",
    "# trans_func函数就是convert_example函数, 只不过函数传入的参数tokenizer,max_seq_length固定\n",
    "trans_func = partial(convert_example, tokenizer=tokenizer, max_seq_length=64) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.943723300Z"
    }
   },
   "outputs": [],
   "source": [
    "# 对训练集中的多条数据进行处理\n",
    "def batchify_fn(samples):\n",
    "    # 我们训练数据会返回 text_a 和 text_b 的input_ids, token_type_ids共4个字段\n",
    "    # 因此针对这4个字段 需要 分别定义 4个操作\n",
    "    \n",
    "    # 在下面这段代码中, Pad是一个函数\n",
    "    # Tuple的作用是将4个Pad函数组合在一起, 所以fn也是一个函数\n",
    "    # 当对一批数据调用fn函数时, 会对这批数据总每个样本的 对应字段应用对应的Pad,Pad,Pad,Pad操作\n",
    "    fn = Tuple(\n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int64'),   \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype='int64'),  \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype='int64'),  \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype='int64'),  \n",
    "    )\n",
    "\n",
    "    # processed_samples是对samples进行fn函数中Pad, Pad, Pad, Pad处理后产生的结果 \n",
    "    processed_samples = fn(samples) \n",
    "\n",
    "    result = []\n",
    "    for data in processed_samples:\n",
    "        result.append(data) \n",
    "\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.947956600Z"
    }
   },
   "outputs": [],
   "source": [
    "# 这部分代码 定义 训练数据集的 加载器\n",
    "\n",
    "# 用于在分布式训练环境中 创建数据批次. 在分布式训练中, 数据需要合理地分配到不同的计算机节点上\n",
    "# 在这里定义每个批次的数据中的 数据量 是 64\n",
    "# shuffle=True要把训练数据集中的数据进行打乱, 再进行分批\n",
    "batch_sampler = paddle.io.DistributedBatchSampler(train_ds, batch_size=64, shuffle=True)\n",
    "\n",
    "# 下面这行代码定义了一个训练数据集的加载器train_data_loader\n",
    "# dataset = train_ds.map(tran_func)的意思是 将函数trans_func应用于train_ds数据集中的每个数据进行处理\n",
    "# 然后将处理后的数据集 作为 真正的 训练数据集\n",
    "# batch_sampler的意思是 以分布式的方式 对数据进行分批采样, train_data_loader会自动对训练数据按照batch_size进行切分\n",
    "# collate_fn=batchify_fn的意思是 利用batchify_fn函数处理每个批次的数据, 将多个数据组合成一个月批次\n",
    "# return_lis=True指定DataLoader在每个批次中以列表的形式返回\n",
    "train_data_loader = paddle.io.DataLoader(dataset=train_ds.map(trans_func), batch_sampler=batch_sampler, collate_fn=batchify_fn, return_list=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:11.956334900Z",
     "start_time": "2025-02-11T07:13:11.953721200Z"
    }
   },
   "outputs": [],
   "source": [
    "# 下面开始搭建召回模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.110763Z",
     "start_time": "2025-02-11T07:13:11.956334900Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 15:13:11,956] [    INFO]\u001B[0m - Already cached C:\\Users\\17362\\.paddlenlp\\models\\ernie-1.0\\model_state.pdparams\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:13:11,956] [    INFO]\u001B[0m - Loading weights file model_state.pdparams from cache at C:\\Users\\17362\\.paddlenlp\\models\\ernie-1.0\\model_state.pdparams\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:13:12,272] [    INFO]\u001B[0m - Loaded weights file from disk, setting weights to model.\u001B[0m\n",
      "\u001B[33m[2025-02-11 15:13:19,060] [ WARNING]\u001B[0m - Some weights of the model checkpoint at ernie-1.0 were not used when initializing ErnieModel: ['cls.predictions.layer_norm.bias', 'cls.predictions.transform.weight', 'cls.predictions.transform.bias', 'cls.predictions.layer_norm.weight', 'cls.predictions.decoder_bias']\n",
      "- This IS expected if you are initializing ErnieModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing ErnieModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:13:19,060] [    INFO]\u001B[0m - All the weights of ErnieModel were initialized from the model checkpoint at ernie-1.0.\n",
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use ErnieModel for predictions without further training.\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "# 下面这句代码加载 预训练模型 ERNIE-1.0, 并将其赋给pretrained_model\n",
    "pretrained_model = paddlenlp.transformers.ErnieModel.from_pretrained(MODEL_NAME)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.110763Z",
     "start_time": "2025-02-11T07:13:19.100831900Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\17362\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\paddle\\jit\\dy2static\\program_translator.py:712: UserWarning: full_graph=False don't support input_spec arguments. It will not produce any effect.\n",
      "You can set full_graph=True, then you can assign input spec.\n",
      "\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from base_model import SemanticIndexBase\n",
    "\n",
    "# 采用批内负采样的方法来构造负样本\n",
    "class SemanticIndexBatchNeg(SemanticIndexBase): # 进行有监督训练的一个模型\n",
    "    def __init__(self, pretrained_model, dropout=None, margin=0.3, scale=30, output_emb_size=None):\n",
    "        super().__init__(pretrained_model, dropout, output_emb_size)\n",
    "\n",
    "        self.margin = margin\n",
    "        self.scale = scale\n",
    "\n",
    "    def forward(self, query_input_ids,    title_input_ids,    query_token_type_ids=None, query_position_ids=None, query_attention_mask=None,    title_token_type_ids=None, title_position_ids=None, title_attention_mask=None):\n",
    "        query_cls_embedding = self.get_pooled_embedding(query_input_ids, query_token_type_ids, query_position_ids, query_attention_mask) \n",
    "\n",
    "        title_cls_embedding = self.get_pooled_embedding(title_input_ids, title_token_type_ids, title_position_ids, title_attention_mask)    \n",
    "        \n",
    "        cosine_sim = paddle.matmul(query_cls_embedding, title_cls_embedding, transpose_y=True)  \n",
    "        \n",
    "        margin_diag = paddle.full(shape=[query_cls_embedding.shape[0]], fill_value=self.margin, dtype=\"float32\") \n",
    "\n",
    "        cosine_sim = cosine_sim - paddle.diag(margin_diag)\n",
    "\n",
    "        cosine_sim = cosine_sim * self.scale\n",
    "\n",
    "        labels = paddle.arange(0, query_cls_embedding.shape[0], dtype='int64') \n",
    "        labels = paddle.reshape(labels, shape=[-1, 1]) \n",
    "\n",
    "        loss = F.cross_entropy(input=cosine_sim, label=labels)\n",
    "\n",
    "        return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.131267300Z",
     "start_time": "2025-02-11T07:13:19.110763Z"
    }
   },
   "outputs": [],
   "source": [
    "model = SemanticIndexBatchNeg(pretrained_model, margin=0.1, scale=20, output_emb_size=256)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.131267300Z",
     "start_time": "2025-02-11T07:13:19.123158100Z"
    }
   },
   "outputs": [],
   "source": [
    "# 下面开始定义模型训练用到的各种参数，并进行模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.131267300Z",
     "start_time": "2025-02-11T07:13:19.123660300Z"
    }
   },
   "outputs": [],
   "source": [
    "epochs=3 \n",
    "num_training_steps = len(train_data_loader) * epochs "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.131267300Z",
     "start_time": "2025-02-11T07:13:19.127120200Z"
    }
   },
   "outputs": [],
   "source": [
    "lr_scheduler = LinearDecayWithWarmup(5E-5, num_training_steps, 0.0) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.138901400Z",
     "start_time": "2025-02-11T07:13:19.131267300Z"
    }
   },
   "outputs": [],
   "source": [
    "decay_params = [\n",
    "        p.name for n, p in model.named_parameters() \n",
    "        if not any(nd in n for nd in [\"bias\", \"norm\"]) \n",
    "    ] "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-11T07:13:19.138901400Z",
     "start_time": "2025-02-11T07:13:19.138398600Z"
    }
   },
   "outputs": [],
   "source": [
    "optimizer = paddle.optimizer.AdamW( \n",
    "    learning_rate=lr_scheduler, \n",
    "    parameters=model.parameters(), \n",
    "    weight_decay=0.0, \n",
    "    apply_decay_param_fun=lambda x: x in decay_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "is_executing": true,
    "ExecuteTime": {
     "start_time": "2025-02-11T07:13:19.138901400Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\17362\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\paddlenlp\\transformers\\tokenizer_utils_base.py:2293: FutureWarning: The `max_seq_len` argument is deprecated and will be removed in a future version, please use `max_length` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 10, epoch: 1, batch: 10, loss: 2.04695, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 15:24:06,727] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_10\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:24:06,744] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_10\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 20, epoch: 1, batch: 20, loss: 1.35102, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 15:32:17,881] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_20\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:32:17,881] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_20\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 30, epoch: 1, batch: 30, loss: 1.00404, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 15:42:53,486] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_30\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:42:53,583] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_30\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 40, epoch: 1, batch: 40, loss: 1.15711, speed: 0.01 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 15:54:37,053] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_40\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 15:54:37,062] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_40\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 50, epoch: 1, batch: 50, loss: 0.77514, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 16:03:47,138] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_50\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 16:03:47,138] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_50\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 60, epoch: 1, batch: 60, loss: 0.81591, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 16:11:32,131] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_60\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 16:11:32,131] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_60\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 70, epoch: 2, batch: 7, loss: 0.51715, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 16:22:33,497] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_70\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 16:22:33,500] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_70\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 80, epoch: 2, batch: 17, loss: 0.73915, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 16:31:09,460] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_80\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 16:31:09,460] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_80\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 90, epoch: 2, batch: 27, loss: 0.72627, speed: 0.01 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 16:44:14,944] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_90\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 16:44:14,946] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_90\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 100, epoch: 2, batch: 37, loss: 0.50815, speed: 0.02 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 16:54:31,058] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_100\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 16:54:31,062] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_100\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 110, epoch: 2, batch: 47, loss: 0.41893, speed: 0.01 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 17:12:57,771] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_110\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 17:12:57,779] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_110\\special_tokens_map.json\u001B[0m\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "global step 120, epoch: 2, batch: 57, loss: 0.48688, speed: 0.01 step/s\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001B[32m[2025-02-11 17:32:00,131] [    INFO]\u001B[0m - tokenizer config file saved in model_param\\model_120\\tokenizer_config.json\u001B[0m\n",
      "\u001B[32m[2025-02-11 17:32:00,135] [    INFO]\u001B[0m - Special tokens file saved in model_param\\model_120\\special_tokens_map.json\u001B[0m\n"
     ]
    }
   ],
   "source": [
    "save_dir='model_param'\n",
    "if not os.path.exists(save_dir):\n",
    "    os.makedirs(save_dir)\n",
    "\n",
    "global_step = 0 \n",
    "tic_train = time.time()\n",
    "\n",
    "for epoch in range(1, epochs + 1): \n",
    "    for step, batch in enumerate(train_data_loader, start=1): \n",
    "        query_input_ids, query_token_type_ids, title_input_ids, title_token_type_ids = batch \n",
    "\n",
    "        loss = model(query_input_ids=query_input_ids, title_input_ids=title_input_ids, query_token_type_ids=query_token_type_ids, title_token_type_ids=title_token_type_ids)\n",
    "\n",
    "        global_step += 1 \n",
    "        if global_step % 10 == 0: \n",
    "            print(\"global step %d, epoch: %d, batch: %d, loss: %.5f, speed: %.2f step/s\"\n",
    "                % (global_step, epoch, step, loss, 10 / (time.time() - tic_train))) \n",
    "            tic_train = time.time() \n",
    "\n",
    "        loss.backward() \n",
    "        optimizer.step() \n",
    "        lr_scheduler.step() \n",
    "        optimizer.clear_grad() \n",
    "\n",
    "        if global_step % 10 == 0: \n",
    "            save_path = os.path.join(save_dir, \"model_%d\" % global_step)\n",
    "            if not os.path.exists(save_path):\n",
    "                os.makedirs(save_path)\n",
    "            save_param_path = os.path.join(save_path, 'model_state.pdparams') \n",
    "            paddle.save(model.state_dict(), save_param_path) \n",
    "            tokenizer.save_pretrained(save_path) "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "search",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
