{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "9aa5cc23-a96f-406f-9b9c-c8519eb12f16",
   "metadata": {},
   "source": [
    "# 全局参数定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3891e357-d354-4ca1-a46c-05fb8fcfbc12",
   "metadata": {},
   "outputs": [],
   "source": [
    "org_model_path = \"facebook/opt-1.3b\"\n",
    "quant_model_path = \"models/facebook-opt-1.3b-awq\"\n",
    "quant_config = {\n",
    "    \"zero_point\": True,\n",
    "    \"q_group_size\": 128,\n",
    "    \"w_bit\": 4,\n",
    "    \"version\": \"GEMM\"\n",
    "}\n",
    "\n",
    "from transformers import pipeline"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "024f64cb-335e-43b2-9f3c-c4176d73f9b8",
   "metadata": {},
   "source": [
    "# 量化前模型测试文本，显存只有16G，改用小模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "b710fc55-e35a-4edf-b502-a500c53e0f22",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/lm_ai_learn/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "# 使用 GPU 加载原始的 OPT-1.3b 模型\n",
    "generator = pipeline('text-generation',\n",
    "                     model=org_model_path,\n",
    "                     device=0,\n",
    "                     do_sample=True,\n",
    "                     num_return_sequences=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "fc93d8df-5a09-448c-8dbc-c6a4fa628643",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'generated_text': 'The woman worked as a nurse for 33 years before her death\\nCalls were launched after the body'},\n",
       " {'generated_text': 'The woman worked as a nurse at a private hospital in Vadodara and was working on contract'},\n",
       " {'generated_text': 'The woman worked as a teacher at another school and had gone to a different school. She met up'}]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "generator(\"The woman worked as a\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9d5c0e7e-c190-4856-a094-e8f3458015cc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'generated_text': 'The man worked as a professor in the history department at the university in Munich but died last April of'},\n",
       " {'generated_text': 'The man worked as a plumber and knew how to repair it himself. He said the owner should'},\n",
       " {'generated_text': 'The man worked as a carpenter and ran a dry cleaning shop. He was also involved in street'}]"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "generator(\"The man worked as a\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3f99580c-453b-45aa-bb41-073e9a881737",
   "metadata": {},
   "source": [
    "# 使用AWQ量化模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "0984044b-fac4-4f6b-84cb-6a72620b70e8",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/miniconda3/envs/lm_ai_learn/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "06e1d0fad9214e3c80e05bb079bf97c4",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Fetching 9 files:   0%|          | 0/9 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "from awq import AutoAWQForCausalLM\n",
    "from transformers import AutoTokenizer\n",
    "\n",
    "# 加载模型\n",
    "model = AutoAWQForCausalLM.from_pretrained(org_model_path, device_map=\"cuda\")\n",
    "tokenizer = AutoTokenizer.from_pretrained(org_model_path, trust_remote_code=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "17a0a3a5-f1d5-46ff-8339-659379e18227",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Repo card metadata block was not found. Setting CardData to empty.\n",
      "AWQ: 100%|██████████| 24/24 [09:14<00:00, 23.10s/it]\n"
     ]
    }
   ],
   "source": [
    "# 量化模型\n",
    "model.quantize(tokenizer, quant_config=quant_config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "50449de5-e9f1-4cba-864b-90b8153aac93",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'zero_point': True, 'q_group_size': 128, 'w_bit': 4, 'version': 'GEMM'}"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "quant_config"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9d5b9067-defb-4cfa-beb3-8ab156a460a5",
   "metadata": {},
   "source": [
    "# Transformers兼容性配置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "37b4d411-d462-4e03-9a15-6f950137b5e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AwqConfig, AutoConfig\n",
    "\n",
    "# 修改配置文件以使其与transformers集成兼容\n",
    "quantization_config = AwqConfig(\n",
    "    bits=quant_config[\"w_bit\"],\n",
    "    group_size=quant_config[\"q_group_size\"],\n",
    "    zero_point=quant_config[\"zero_point\"],\n",
    "    version=quant_config[\"version\"].lower(),\n",
    ").to_dict()\n",
    "\n",
    "# 预训练的transformers模型存储在model属性中，我们需要传递一个字典\n",
    "model.model.config.quantization_config = quantization_config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "1d900422-4bd1-4ca2-b8e0-c238d20239e7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "('models/facebook-opt-1.3b-awq/tokenizer_config.json',\n",
       " 'models/facebook-opt-1.3b-awq/special_tokens_map.json',\n",
       " 'models/facebook-opt-1.3b-awq/vocab.json',\n",
       " 'models/facebook-opt-1.3b-awq/merges.txt',\n",
       " 'models/facebook-opt-1.3b-awq/added_tokens.json',\n",
       " 'models/facebook-opt-1.3b-awq/tokenizer.json')"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 保存模型权重\n",
    "model.save_quantized(quant_model_path)\n",
    "tokenizer.save_pretrained(quant_model_path)  # 保存分词器"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "76b1f8df-c5bf-472d-afaf-2d214bd2ad50",
   "metadata": {},
   "source": [
    "# 使用GPU加载量化模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "fdb6b2a0-a8e3-4135-a47f-3d4cba3cf056",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
    "\n",
    "tokenizer = AutoTokenizer.from_pretrained(quant_model_path)\n",
    "model = AutoModelForCausalLM.from_pretrained(quant_model_path, device_map=\"cuda\").to(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "347e04fd-79b9-4f2d-9b76-c65ee1241bd9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_text(text):\n",
    "    inputs = tokenizer(text, return_tensors=\"pt\").to(0)\n",
    "\n",
    "    out = model.generate(**inputs, max_new_tokens=64)\n",
    "    return tokenizer.decode(out[0], skip_special_tokens=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "fb10df23-c9d5-41ab-9083-6728f8abdb55",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Merry Christmas! I'm glad to the top the top the\n",
      "I think  I'm  I'm   I'm  I'm   I'm  I'm  I'm  I'm  IAm  I am  I am I am i am   i am.\n",
      "I am\n",
      "\n",
      "I am\n",
      "I am\n",
      "I\n"
     ]
    }
   ],
   "source": [
    "result = generate_text(\"Merry Christmas! I'm glad to\")\n",
    "print(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "645d01fc-362b-49cf-a92d-41924104a9f8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The woman worked as a a a a a a a\n",
      "I think as a a a a a as as a\n",
      "I am as a a a as a a as as a a\n",
      "We are as a a a a as as a as\n",
      "We are as a as a as as a as\n",
      "We are as an as a as a\n"
     ]
    }
   ],
   "source": [
    "result = generate_text(\"The woman worked as a\")\n",
    "print(result)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
