{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "ef355cd2-80a9-489c-b423-af7bef3c432a",
   "metadata": {},
   "source": [
    "# 通义模型加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7cee054d-5a2d-426f-b4de-4f46eb132fec",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:32.814277Z",
     "iopub.status.busy": "2024-09-21T10:22:32.814106Z",
     "iopub.status.idle": "2024-09-21T10:22:36.146123Z",
     "shell.execute_reply": "2024-09-21T10:22:36.145540Z",
     "shell.execute_reply.started": "2024-09-21T10:22:32.814256Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "from modelscope import AutoModelForCausalLM,AutoTokenizer\n",
    "import torch\n",
    "\n",
    "device='cuda' if torch.cuda.is_available() else 'cpu'\n",
    "\n",
    "def create_qwen_model():\n",
    "    model = AutoModelForCausalLM.from_pretrained(\n",
    "        \"qwen/Qwen2-0.5B-Instruct\",\n",
    "        torch_dtype=\"auto\",\n",
    "        device_map=\"auto\"\n",
    "    )\n",
    "    tokenizer = AutoTokenizer.from_pretrained(\"qwen/Qwen2-0.5B-Instruct\")\n",
    "    return model,tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "e12a94c0-fdd0-41af-8078-091d5d356846",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:36.148188Z",
     "iopub.status.busy": "2024-09-21T10:22:36.147567Z",
     "iopub.status.idle": "2024-09-21T10:22:40.976964Z",
     "shell.execute_reply": "2024-09-21T10:22:40.976367Z",
     "shell.execute_reply.started": "2024-09-21T10:22:36.148166Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# DPO训练的模型\n",
    "model_pi,tokenizer=create_qwen_model()\n",
    "# DPO参照的模型\n",
    "model_ref,_=create_qwen_model()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "df98393b-9a1b-4833-8c4f-8cb2ab2cec18",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:40.978217Z",
     "iopub.status.busy": "2024-09-21T10:22:40.978007Z",
     "iopub.status.idle": "2024-09-21T10:22:40.982536Z",
     "shell.execute_reply": "2024-09-21T10:22:40.982026Z",
     "shell.execute_reply.started": "2024-09-21T10:22:40.978197Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 模型测试方法\n",
    "def chat(prompt,tokenizer,model):\n",
    "    messages = [\n",
    "        {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
    "        {\"role\": \"user\", \"content\": prompt},\n",
    "    ]\n",
    "    text = tokenizer.apply_chat_template(\n",
    "        messages,\n",
    "        tokenize=False,\n",
    "        add_generation_prompt=True\n",
    "    )\n",
    "    #print(text)\n",
    "\n",
    "    model_inputs = tokenizer([text], return_tensors=\"pt\").to(device)\n",
    "\n",
    "    generated_ids = model.generate(\n",
    "        model_inputs.input_ids,\n",
    "        max_new_tokens=512\n",
    "    )\n",
    "    generated_ids = [\n",
    "        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)\n",
    "    ]\n",
    "\n",
    "    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]\n",
    "    return response"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f1f16dc6-3233-4db4-a8dc-a211a8eb76ca",
   "metadata": {},
   "source": [
    "# 训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "7850992b-7084-4746-a273-2571164760d9",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:40.983658Z",
     "iopub.status.busy": "2024-09-21T10:22:40.983272Z",
     "iopub.status.idle": "2024-09-21T10:22:40.988144Z",
     "shell.execute_reply": "2024-09-21T10:22:40.987673Z",
     "shell.execute_reply.started": "2024-09-21T10:22:40.983639Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "dpo_train_data=[\n",
    "    {'prompt':'你是谁?','chosen':'通义千问','reject':'我是阿里云开发的超大规模语言模型，我叫通义千问。'},\n",
    "    {'prompt':'你是谁发明的?','chosen':'小鱼儿','reject':'阿里巴巴'},\n",
    "]\n",
    "\n",
    "# 偏好数据集 -> 模型输入\n",
    "def dpo_to_messages(dpo_pairs):\n",
    "    chosen_messages=[]\n",
    "    reject_messages=[]\n",
    "    for pair in dpo_pairs:\n",
    "        chosen_messages.append([\n",
    "                {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
    "                {\"role\": \"user\", \"content\": pair['prompt']},\n",
    "                {\"role\": \"assistant\", \"content\": pair['chosen']},\n",
    "            ]\n",
    "        )\n",
    "        reject_messages.append([\n",
    "                {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
    "                {\"role\": \"user\", \"content\": pair['prompt']},\n",
    "                {\"role\": \"assistant\", \"content\": pair['reject']},\n",
    "            ]\n",
    "        )\n",
    "    return chosen_messages,reject_messages"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "e88a49d8-e64a-469a-9d9f-97414107ce55",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:40.988888Z",
     "iopub.status.busy": "2024-09-21T10:22:40.988714Z",
     "iopub.status.idle": "2024-09-21T10:22:40.995433Z",
     "shell.execute_reply": "2024-09-21T10:22:40.994964Z",
     "shell.execute_reply.started": "2024-09-21T10:22:40.988871Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 训练数据预处理\n",
    "def preprocess(tokenizer,batch_messages):\n",
    "    input_list=[]\n",
    "    target_list=[]\n",
    "    \n",
    "    im_start=tokenizer('<|im_start|>').input_ids\n",
    "    im_end=tokenizer('<|im_end|>').input_ids\n",
    "    newline=tokenizer('\\n').input_ids\n",
    "    pad=tokenizer('<|endoftext|>').input_ids\n",
    "    ignore=[-100]\n",
    "    \n",
    "    for group in batch_messages:\n",
    "        input_ids=[]\n",
    "        target_ids=[]\n",
    "        for msg in group:\n",
    "            role=tokenizer(msg['role']).input_ids\n",
    "            content=tokenizer(msg['content']).input_ids\n",
    "            if msg['role'] in ['system','user']:\n",
    "                ignore_parts=role+newline+content\n",
    "                input_ids+=im_start+ignore_parts+im_end+newline\n",
    "                target_ids+=im_start+ignore*len(ignore_parts)+im_end+newline\n",
    "            else:\n",
    "                ignore_parts=role+newline\n",
    "                input_ids+=im_start+ignore_parts+content+im_end+newline\n",
    "                target_ids+=im_start+ignore*len(ignore_parts)+content+im_end+newline\n",
    "        input_list.append(input_ids)\n",
    "        target_list.append(target_ids)\n",
    "    \n",
    "    # padding\n",
    "    max_len=max([len(ids) for ids in input_list])\n",
    "    for input_ids,target_ids in zip(input_list,target_list):\n",
    "        input_ids+=pad*(max_len-len(input_ids))\n",
    "        target_ids+=ignore*(max_len-len(target_ids))\n",
    "    batch_input_ids=torch.tensor(input_list,dtype=torch.long)\n",
    "    batch_target_ids=torch.tensor(target_list,dtype=torch.long)\n",
    "    batch_mask=batch_input_ids.ne(pad[0]).type(torch.long)\n",
    "    return batch_input_ids,batch_target_ids,batch_mask"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9aa2c1d5-dc25-4fbf-8ae5-fcb2f565a7dd",
   "metadata": {},
   "source": [
    "# DPO训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "c5222f54-0e3a-49d4-9e28-921ca6ce6f38",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:40.996197Z",
     "iopub.status.busy": "2024-09-21T10:22:40.996024Z",
     "iopub.status.idle": "2024-09-21T10:22:41.005502Z",
     "shell.execute_reply": "2024-09-21T10:22:41.005058Z",
     "shell.execute_reply.started": "2024-09-21T10:22:40.996180Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Qwen2ForCausalLM(\n",
       "  (model): Qwen2Model(\n",
       "    (embed_tokens): Embedding(151936, 896)\n",
       "    (layers): ModuleList(\n",
       "      (0-23): 24 x Qwen2DecoderLayer(\n",
       "        (self_attn): Qwen2SdpaAttention(\n",
       "          (q_proj): Linear(in_features=896, out_features=896, bias=True)\n",
       "          (k_proj): Linear(in_features=896, out_features=128, bias=True)\n",
       "          (v_proj): Linear(in_features=896, out_features=128, bias=True)\n",
       "          (o_proj): Linear(in_features=896, out_features=896, bias=False)\n",
       "          (rotary_emb): Qwen2RotaryEmbedding()\n",
       "        )\n",
       "        (mlp): Qwen2MLP(\n",
       "          (gate_proj): Linear(in_features=896, out_features=4864, bias=False)\n",
       "          (up_proj): Linear(in_features=896, out_features=4864, bias=False)\n",
       "          (down_proj): Linear(in_features=4864, out_features=896, bias=False)\n",
       "          (act_fn): SiLU()\n",
       "        )\n",
       "        (input_layernorm): Qwen2RMSNorm((896,), eps=1e-06)\n",
       "        (post_attention_layernorm): Qwen2RMSNorm((896,), eps=1e-06)\n",
       "      )\n",
       "    )\n",
       "    (norm): Qwen2RMSNorm((896,), eps=1e-06)\n",
       "  )\n",
       "  (lm_head): Linear(in_features=896, out_features=151936, bias=False)\n",
       ")"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_pi.train()\n",
    "model_ref.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "f5ee1a21-5b4e-4342-a8ee-1385b3f4e392",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:41.007418Z",
     "iopub.status.busy": "2024-09-21T10:22:41.007092Z",
     "iopub.status.idle": "2024-09-21T10:22:41.011167Z",
     "shell.execute_reply": "2024-09-21T10:22:41.010638Z",
     "shell.execute_reply.started": "2024-09-21T10:22:41.007400Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 优化器，只训练pi模型\n",
    "optimizer=torch.optim.SGD(model_pi.parameters(),lr=1e-3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "87dac537-5bb4-425c-a240-ac685c83291d",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:41.012122Z",
     "iopub.status.busy": "2024-09-21T10:22:41.011943Z",
     "iopub.status.idle": "2024-09-21T10:22:41.018626Z",
     "shell.execute_reply": "2024-09-21T10:22:41.018112Z",
     "shell.execute_reply.started": "2024-09-21T10:22:41.012105Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# DPO损失计算-辅助函数\n",
    "def dpo_prob_calc(target_ids,pi_logits,ref_logits):\n",
    "    pi_probs=torch.log_softmax(pi_logits,dim=-1)      # softmax概率+log对数\n",
    "    ref_probs=torch.log_softmax(ref_logits,dim=-1)\n",
    "    \n",
    "    ignore_mask=target_ids!=-100 # ignore token掩码\n",
    "    indexes=target_ids*ignore_mask # 将-100变成0，以便后面gather可以运行\n",
    "    \n",
    "    pi_probs_of_target=torch.gather(pi_probs,dim=-1,index=indexes.unsqueeze(-1)).squeeze(-1) * ignore_mask # 取目标target token的概率，忽略-100 token\n",
    "    ref_probs_of_target=torch.gather(ref_probs,dim=-1,index=indexes.unsqueeze(-1)).squeeze(-1) * ignore_mask    \n",
    "    \n",
    "    pi_final_prob=pi_probs_of_target.sum(-1)/ignore_mask.sum(-1)     # 求每一个样本的token prob均值\n",
    "    ref_final_prob=ref_probs_of_target.sum(-1)/ignore_mask.sum(-1)\n",
    "    return pi_final_prob,ref_final_prob\n",
    "    \n",
    "# DPO损失函数 https://github.com/huggingface/trl/blob/main/trl/trainer/dpo_trainer.py\n",
    "def dpo_loss(params):\n",
    "    ## 两个模型的chosen输出\n",
    "    chosen_target_ids=params['chosen_target_ids'][:,1:]\n",
    "    pi_chosen_logits=params['pi_chosen_logits'][:,:-1,:]\n",
    "    ref_chosen_logits=params['ref_chosen_logits'][:,:-1,:]\n",
    "    pi_chosen_prob,ref_chosen_prob=dpo_prob_calc(chosen_target_ids,pi_chosen_logits,ref_chosen_logits)\n",
    "    \n",
    "    ## 两个模型的reject输出\n",
    "    reject_target_ids=params['reject_target_ids'][:,1:]\n",
    "    pi_reject_logits=params['pi_reject_logits'][:,:-1,:]\n",
    "    ref_reject_logits=params['ref_reject_logits'][:,:-1,:]\n",
    "    pi_reject_prob,ref_reject_prob=dpo_prob_calc(reject_target_ids,pi_reject_logits,ref_reject_logits)\n",
    "    \n",
    "    # 计算DPO Loss\n",
    "    pi_prob_diff=pi_chosen_prob-pi_reject_prob \n",
    "    ref_prob_diff=ref_chosen_prob-ref_reject_prob\n",
    "    beta=0.1\n",
    "    loss=-torch.nn.functional.logsigmoid(beta*(pi_prob_diff-ref_prob_diff))\n",
    "    return loss.mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "5de8e708-ce5f-4c80-82f6-989285a7f284",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:41.019511Z",
     "iopub.status.busy": "2024-09-21T10:22:41.019332Z",
     "iopub.status.idle": "2024-09-21T10:22:46.984565Z",
     "shell.execute_reply": "2024-09-21T10:22:46.983884Z",
     "shell.execute_reply.started": "2024-09-21T10:22:41.019493Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Starting from v4.46, the `logits` model output will have the same type as the model (except at train time, where it will always be FP32)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.6931, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.6779, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.6652, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.6437, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.6188, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.6312, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.6198, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.6039, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.5879, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.5715, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.5521, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.5223, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.4829, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.4298, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.3922, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.3678, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.3481, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.3873, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.3114, device='cuda:0', grad_fn=<MeanBackward0>)\n",
      "tensor(0.2781, device='cuda:0', grad_fn=<MeanBackward0>)\n"
     ]
    }
   ],
   "source": [
    "iterators=20\n",
    "\n",
    "vocab=tokenizer.get_vocab()\n",
    "for i in range(iterators):\n",
    "    # 一批模拟数据\n",
    "    chosen_messages,reject_messages=dpo_to_messages(dpo_train_data)\n",
    "    # model输入和输出\n",
    "    chosen_input_ids,chosen_target_ids,chosen_mask=preprocess(tokenizer,chosen_messages)\n",
    "    reject_input_ids,reject_target_ids,reject_mask=preprocess(tokenizer,reject_messages)\n",
    "    # model_pi预测\n",
    "    pi_chosen_logits=model_pi(input_ids=chosen_input_ids.to(device),attention_mask=chosen_mask.to(device)).logits\n",
    "    pi_reject_logits=model_pi(input_ids=reject_input_ids.to(device),attention_mask=reject_mask.to(device)).logits\n",
    "    # model_ref预测\n",
    "    ref_chosen_logits=model_ref(chosen_input_ids.to(device),chosen_mask.to(device)).logits\n",
    "    ref_reject_logits=model_ref(reject_input_ids.to(device),reject_mask.to(device)).logits\n",
    "    # 求DPO损失\n",
    "    loss=dpo_loss({\n",
    "        'chosen_target_ids':chosen_target_ids.to(device),\n",
    "        'reject_target_ids':reject_target_ids.to(device),\n",
    "        'pi_chosen_logits':pi_chosen_logits.to(device),\n",
    "        'pi_reject_logits':pi_reject_logits.to(device),\n",
    "        'ref_chosen_logits':ref_chosen_logits.to(device),\n",
    "        'ref_reject_logits':ref_reject_logits.to(device),\n",
    "    })\n",
    "    print(loss)\n",
    "    optimizer.zero_grad()\n",
    "    loss.backward()\n",
    "    optimizer.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "36f398f7-5888-4e4a-8001-e3dfcaeacfcd",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:46.986032Z",
     "iopub.status.busy": "2024-09-21T10:22:46.985505Z",
     "iopub.status.idle": "2024-09-21T10:22:46.992713Z",
     "shell.execute_reply": "2024-09-21T10:22:46.992020Z",
     "shell.execute_reply.started": "2024-09-21T10:22:46.985998Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Qwen2ForCausalLM(\n",
       "  (model): Qwen2Model(\n",
       "    (embed_tokens): Embedding(151936, 896)\n",
       "    (layers): ModuleList(\n",
       "      (0-23): 24 x Qwen2DecoderLayer(\n",
       "        (self_attn): Qwen2SdpaAttention(\n",
       "          (q_proj): Linear(in_features=896, out_features=896, bias=True)\n",
       "          (k_proj): Linear(in_features=896, out_features=128, bias=True)\n",
       "          (v_proj): Linear(in_features=896, out_features=128, bias=True)\n",
       "          (o_proj): Linear(in_features=896, out_features=896, bias=False)\n",
       "          (rotary_emb): Qwen2RotaryEmbedding()\n",
       "        )\n",
       "        (mlp): Qwen2MLP(\n",
       "          (gate_proj): Linear(in_features=896, out_features=4864, bias=False)\n",
       "          (up_proj): Linear(in_features=896, out_features=4864, bias=False)\n",
       "          (down_proj): Linear(in_features=4864, out_features=896, bias=False)\n",
       "          (act_fn): SiLU()\n",
       "        )\n",
       "        (input_layernorm): Qwen2RMSNorm((896,), eps=1e-06)\n",
       "        (post_attention_layernorm): Qwen2RMSNorm((896,), eps=1e-06)\n",
       "      )\n",
       "    )\n",
       "    (norm): Qwen2RMSNorm((896,), eps=1e-06)\n",
       "  )\n",
       "  (lm_head): Linear(in_features=896, out_features=151936, bias=False)\n",
       ")"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_pi.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "7061db38-7d78-4d11-803d-4dff4f98a19f",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:46.994071Z",
     "iopub.status.busy": "2024-09-21T10:22:46.993629Z",
     "iopub.status.idle": "2024-09-21T10:22:47.107658Z",
     "shell.execute_reply": "2024-09-21T10:22:47.107110Z",
     "shell.execute_reply.started": "2024-09-21T10:22:46.994042Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "The attention mask is not set and cannot be inferred from input because pad token is same as eos token. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'通义千问'"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "chat('你是谁?',tokenizer,model_pi)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "9f3c7feb-350d-4208-9ab5-75186339a031",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:47.108918Z",
     "iopub.status.busy": "2024-09-21T10:22:47.108449Z",
     "iopub.status.idle": "2024-09-21T10:22:47.193092Z",
     "shell.execute_reply": "2024-09-21T10:22:47.192562Z",
     "shell.execute_reply.started": "2024-09-21T10:22:47.108888Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'小鱼儿'"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "chat('你是谁发明的?',tokenizer,model_pi)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "8e848068-aa49-4a0d-b116-ebe34ed8c9d2",
   "metadata": {
    "ExecutionIndicator": {
     "show": true
    },
    "execution": {
     "iopub.execute_input": "2024-09-21T10:22:47.194311Z",
     "iopub.status.busy": "2024-09-21T10:22:47.193879Z",
     "iopub.status.idle": "2024-09-21T10:22:52.427442Z",
     "shell.execute_reply": "2024-09-21T10:22:52.426764Z",
     "shell.execute_reply.started": "2024-09-21T10:22:47.194281Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Transformer模型是近年来发展非常迅速的自然语言处理模型，它主要用于生成、预训练和推理。它的基本思想是在一个隐藏层中嵌入大量的上下文信息，使得模型能够理解句子的整体含义，并且可以自动生成与输入语句相关的高质量文本。\\n\\nTransformer模型由多个部分组成，包括两个主要的层——Transformer网络和一个编码器（Encoder）。其中，Transformer网络负责将输入的文本转换为一个表示完整句子的向量表示，而编码器则负责从这个向量表示中提取出有意义的信息，以便进行下一步的处理或预测。\\n\\n具体来说，Transformer模型通常包含以下几个关键组件：\\n\\n1. 输入端：用于接收输入的文本。\\n2. 隐藏层：用于存储和保存上一层的输出结果。\\n3. 多层：每个隐藏层都与之前的一个隐藏层相关联，以进一步增强模型的泛化能力。\\n4. 问题解决单元（Decoder）：用于在后续的操作中对这些信息进行解释和决策。\\n5. 输出端：用于将处理后的信息返回给用户。\\n\\nTransformer模型已经被广泛应用于各种任务，例如语言理解和生成、文本分类、机器翻译等。它的优点在于它可以有效地处理长文本数据，同时也能更好地捕捉复杂语境下的信息。'"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "chat('讲讲transformer模型',tokenizer,model_pi)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
