{
 "cells": [
  {
   "cell_type": "code",
   "id": "6e886a5058a2d88a",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-07-21T14:02:01.519119Z",
     "start_time": "2025-07-21T14:01:59.566020Z"
    }
   },
   "source": [
    "# 导入必要的库\n",
    "import torch  # PyTorch库，用于深度学习模型的训练和推理\n",
    "from tqdm import tqdm  # tqdm库，用于显示进度条\n",
    "from modelscope import AutoTokenizer, AutoModel  # modelscope库，用于自动加载预训练模型和分词器\n",
    "from peft import PeftModel, PeftConfig  # peft库，用于进行prompt-tuning高效微调"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-21T14:02:05.788228Z",
     "start_time": "2025-07-21T14:02:05.775228Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义模型和预训练模型的路径\n",
    "model_dir = \"C:\\\\Users\\\\16014\\\\.cache\\\\modelscope\\\\hub\\\\models\\\\ZhipuAI\\\\chatglm3-6b\"  # 预训练模型的路径\n",
    "peft_model_id = \"./lora_saver/lora_query_key_value\"  # PEFT微调模型的路径\n",
    "#peft_model_id = \"E:\\\\projects\\\\chatglm3-project\\\\chatglm3-project\\\\ch10\\\\lora_saver\\\\lora_query_key_value.pth\""
   ],
   "id": "49855d9e6caa505f",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-21T14:02:18.777033Z",
     "start_time": "2025-07-21T14:02:06.989234Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 在不计算梯度的情况下进行模型加载和预处理\n",
    "with torch.no_grad():\n",
    "    # 从预训练模型路径自动加载分词器\n",
    "    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)\n",
    "    # 从预训练模型路径自动加载模型，并转换为半精度浮点数格式（节省显存），然后移动到GPU上\n",
    "    model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).half().cuda()"
   ],
   "id": "9f5643aad23b2bfd",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/7 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "faf1ace4fe064655ac514b9e1b0fbab3"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-21T14:02:18.870542Z",
     "start_time": "2025-07-21T14:02:18.779033Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 使用PEFT微调模型对原始模型进行微调\n",
    "model = PeftModel.from_pretrained(model, peft_model_id)\n",
    "# 将模型设置为评估模式（关闭dropout、batchnorm等层的影响）\n",
    "model.eval()"
   ],
   "id": "f96b2d8c6bce133c",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "PeftModelForCausalLM(\n",
       "  (base_model): LoraModel(\n",
       "    (model): ChatGLMForConditionalGeneration(\n",
       "      (transformer): ChatGLMModel(\n",
       "        (embedding): Embedding(\n",
       "          (word_embeddings): Embedding(65024, 4096)\n",
       "        )\n",
       "        (rotary_pos_emb): RotaryEmbedding()\n",
       "        (encoder): GLMTransformer(\n",
       "          (layers): ModuleList(\n",
       "            (0-27): 28 x GLMBlock(\n",
       "              (input_layernorm): RMSNorm()\n",
       "              (self_attention): SelfAttention(\n",
       "                (query_key_value): lora.Linear(\n",
       "                  (base_layer): Linear(in_features=4096, out_features=4608, bias=True)\n",
       "                  (lora_dropout): ModuleDict(\n",
       "                    (default): Dropout(p=0.05, inplace=False)\n",
       "                  )\n",
       "                  (lora_A): ModuleDict(\n",
       "                    (default): Linear(in_features=4096, out_features=8, bias=False)\n",
       "                  )\n",
       "                  (lora_B): ModuleDict(\n",
       "                    (default): Linear(in_features=8, out_features=4608, bias=False)\n",
       "                  )\n",
       "                  (lora_embedding_A): ParameterDict()\n",
       "                  (lora_embedding_B): ParameterDict()\n",
       "                  (lora_magnitude_vector): ModuleDict()\n",
       "                )\n",
       "                (core_attention): CoreAttention(\n",
       "                  (attention_dropout): Dropout(p=0.0, inplace=False)\n",
       "                )\n",
       "                (dense): Linear(in_features=4096, out_features=4096, bias=False)\n",
       "              )\n",
       "              (post_attention_layernorm): RMSNorm()\n",
       "              (mlp): MLP(\n",
       "                (dense_h_to_4h): Linear(in_features=4096, out_features=27392, bias=False)\n",
       "                (dense_4h_to_h): Linear(in_features=13696, out_features=4096, bias=False)\n",
       "              )\n",
       "            )\n",
       "          )\n",
       "          (final_layernorm): RMSNorm()\n",
       "        )\n",
       "        (output_layer): Linear(in_features=4096, out_features=65024, bias=False)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-21T14:02:58.961303Z",
     "start_time": "2025-07-21T14:02:58.956304Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义对话历史和查询\n",
    "history = []  # 对话历史，初始为空列表\n",
    "query = \"你是谁\"  # 查询语句，即用户的输入问题\n",
    "role = \"user\"  # 角色，这里设置为\"user\"表示是用户的输入"
   ],
   "id": "374dc60b0873bdc1",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-21T14:02:59.719122Z",
     "start_time": "2025-07-21T14:02:59.709933Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 使用分词器构建聊天输入\n",
    "inputs = tokenizer.build_chat_input(query, history=history, role=role)\n",
    "# 将输入数据移动到GPU上\n",
    "inputs = inputs.to('cuda')"
   ],
   "id": "3d0ae70ca87aea5a",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-21T14:03:00.418359Z",
     "start_time": "2025-07-21T14:03:00.401852Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义结束标记和生成参数\n",
    "eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command(\"<|user|>\"),\n",
    "                tokenizer.get_command(\"<|observation|>\")]  # 结束标记，包括普通的结束标记和特殊角色的结束标记\n",
    "gen_kwargs = {\"max_length\": 1200, \"num_beams\": 1, \"do_sample\": True, \"top_p\": 0.95,\n",
    "              \"temperature\": 0.95}  # 生成参数，包括最大长度、集束搜索宽度、是否进行采样、top-p参数和温度参数"
   ],
   "id": "6d3e3dc144904744",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-21T14:03:04.161396Z",
     "start_time": "2025-07-21T14:03:01.044735Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 生成输出\n",
    "outputs = model.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)\n",
    "# 对输出进行处理，去掉输入的部分并转换为列表形式\n",
    "outputs = outputs.tolist()[0][len(inputs[\"input_ids\"][0]):-1]\n",
    "\n",
    "# 使用分词器解码输出\n",
    "response = tokenizer.decode(outputs)\n",
    "# 处理响应，包括去除一些特殊标记和更新对话历史\n",
    "response, history = model.process_response(response, history)\n",
    "\n",
    "# 打印响应\n",
    "print(response)"
   ],
   "id": "initial_id",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "我叫茂茂，现在是你的购物助理机器人。\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "7e5233eb028a1530"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
