{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型训练好后推理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "import torch\n",
    "from peft import PeftModel\n",
    "\n",
    "mode_path = './qwen/Qwen1___5-7B-Chat/'\n",
    "lora_path = 'output/Qwen1.5/checkpoint-700'\n",
    "\n",
    "# 加载tokenizer\n",
    "tokenizer = AutoTokenizer.from_pretrained(mode_path)\n",
    "\n",
    "# 加载模型\n",
    "model = AutoModelForCausalLM.from_pretrained(mode_path, device_map=\"auto\",torch_dtype=torch.bfloat16)\n",
    "\n",
    "# 加载lora权重\n",
    "model = PeftModel.from_pretrained(model, model_id=lora_path)\n",
    "\n",
    "\n",
    "import csv\n",
    "import json\n",
    "\n",
    "\n",
    "jsonl_file = './test.jsonl'\n",
    "result = './result.jsonl'\n",
    "# 生成JSONL文件\n",
    "messages = []\n",
    "\n",
    "\n",
    "# 读取jsonl文件\n",
    "with open(jsonl_file, 'r') as file:\n",
    "    for line in file:\n",
    "        # 解析每一行的json数据\n",
    "        data = json.loads(line)\n",
    "        context = data[\"text\"]\n",
    "        catagory = data[\"category\"]\n",
    "        prompt = f'文本:{context},类型选型:{catagory}'\n",
    "        messages = [\n",
    "            {\"role\": \"system\", \"content\": \"你是一个文本分类领域的专家，你会接收到一段文本和几个潜在的分类选项，请输出文本内容的正确类型\"},\n",
    "            {\"role\": \"user\", \"content\": prompt}\n",
    "        ]\n",
    "\n",
    "        text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
    "\n",
    "        model_inputs = tokenizer([text], return_tensors=\"pt\").to('cuda')\n",
    "\n",
    "        generated_ids = model.generate(\n",
    "            model_inputs.input_ids,\n",
    "            max_new_tokens=512\n",
    "        )\n",
    "        generated_ids = [\n",
    "            output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)\n",
    "        ]\n",
    "\n",
    "        response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]\n",
    "        message = response\n",
    "        print(response)\n",
    "        messages.append(message)\n",
    "\n",
    "# 保存为JSONL文件\n",
    "with open(result, 'w', encoding='utf-8') as file:\n",
    "    for message in messages:\n",
    "        file.write(json.dumps(message, ensure_ascii=False) + '\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "quen_env",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.9.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
