{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "de53995b-32ed-4722-8cac-ba104c8efacb",
   "metadata": {},
   "source": [
    "# 导入环境"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "52fac949-4150-4091-b0c3-2968ab5e385c",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/.conda/envs/hello-x/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "from datasets import Dataset\n",
    "import pandas as pd\n",
    "from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForSeq2Seq, TrainingArguments, Trainer, GenerationConfig"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "e098d9eb",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 将JSON文件转换为CSV文件\n",
    "df = pd.read_json('../../data/huanhuan.json')\n",
    "ds = Dataset.from_pandas(df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "51d05e5d-d14e-4f03-92be-9a9677d41918",
   "metadata": {},
   "source": [
    "# 处理数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "74ee5a67-2e55-4974-b90e-cbf492de500a",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "LlamaTokenizerFast(name_or_path='../../model/deepseek-ai/deepseek-llm-7b-chat/', vocab_size=100000, model_max_length=4096, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<｜begin▁of▁sentence｜>', 'eos_token': '<｜end▁of▁sentence｜>', 'pad_token': '<｜end▁of▁sentence｜>'}, clean_up_tokenization_spaces=False),  added_tokens_decoder={\n",
       "\t100000: AddedToken(\"<｜begin▁of▁sentence｜>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
       "\t100001: AddedToken(\"<｜end▁of▁sentence｜>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
       "\t100002: AddedToken(\"ø\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100003: AddedToken(\"ö\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100004: AddedToken(\"ú\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100005: AddedToken(\"ÿ\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100006: AddedToken(\"õ\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100007: AddedToken(\"÷\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100008: AddedToken(\"û\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100009: AddedToken(\"ý\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100010: AddedToken(\"À\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100011: AddedToken(\"ù\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100012: AddedToken(\"Á\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100013: AddedToken(\"þ\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "\t100014: AddedToken(\"ü\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=False),\n",
       "}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained('../../model/deepseek-ai/deepseek-llm-7b-chat/', use_fast=False, trust_remote_code=True)\n",
    "tokenizer.padding_side = 'right'\n",
    "tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "2503a5fa-9621-4495-9035-8e7ef6525691",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "def process_func(example):\n",
    "    MAX_LENGTH = 384    # Llama分词器会将一个中文字切分为多个token，因此需要放开一些最大长度，保证数据的完整性\n",
    "    input_ids, attention_mask, labels = [], [], []\n",
    "    instruction = tokenizer(f\"User: {example['instruction']+example['input']}\\n\\n\", add_special_tokens=False)  # add_special_tokens 不在开头加 special_tokens\n",
    "    response = tokenizer(f\"Assistant: {example['output']}<｜end▁of▁sentence｜>\", add_special_tokens=False)\n",
    "    input_ids = instruction[\"input_ids\"] + response[\"input_ids\"] + [tokenizer.pad_token_id]\n",
    "    attention_mask = instruction[\"attention_mask\"] + response[\"attention_mask\"] + [1]  # 因为eos token咱们也是要关注的所以 补充为1\n",
    "    labels = [-100] * len(instruction[\"input_ids\"]) + response[\"input_ids\"] + [tokenizer.pad_token_id]  \n",
    "    if len(input_ids) > MAX_LENGTH:  # 做一个截断\n",
    "        input_ids = input_ids[:MAX_LENGTH]\n",
    "        attention_mask = attention_mask[:MAX_LENGTH]\n",
    "        labels = labels[:MAX_LENGTH]\n",
    "    return {\n",
    "        \"input_ids\": input_ids,\n",
    "        \"attention_mask\": attention_mask,\n",
    "        \"labels\": labels\n",
    "    }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "84f870d6-73a9-4b0f-8abf-687b32224ad8",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Map:   0%|          | 0/3729 [00:00<?, ? examples/s]"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "                                                                 \r"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['input_ids', 'attention_mask', 'labels'],\n",
       "    num_rows: 3729\n",
       "})"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenized_id = ds.map(process_func, remove_columns=ds.column_names)\n",
    "tokenized_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "1f7e15a0-4d9a-4935-9861-00cc472654b1",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'User: 小姐，别的秀女都在求中选，唯有咱们小姐想被撂牌子，菩萨一定记得真真儿的——\\n\\nAssistant: 嘘——都说许愿说破是不灵的。<｜end▁of▁sentence｜><｜end▁of▁sentence｜>'"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.decode(tokenized_id[0]['input_ids'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "97f16f66-324a-454f-8cc3-ef23b100ecff",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'Assistant: 你们俩话太多了，我该和温太医要一剂药，好好治治你们。<｜end▁of▁sentence｜><｜end▁of▁sentence｜>'"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.decode(list(filter(lambda x: x != -100, tokenized_id[1][\"labels\"])))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "424823a8-ed0d-4309-83c8-3f6b1cdf274c",
   "metadata": {},
   "source": [
    "# 创建模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "170764e5-d899-4ef4-8c53-36f6dec0d198",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function.\n",
      "Loading checkpoint shards: 100%|██████████| 2/2 [00:42<00:00, 21.25s/it]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "LlamaForCausalLM(\n",
       "  (model): LlamaModel(\n",
       "    (embed_tokens): Embedding(102400, 4096)\n",
       "    (layers): ModuleList(\n",
       "      (0-29): 30 x LlamaDecoderLayer(\n",
       "        (self_attn): LlamaAttention(\n",
       "          (q_proj): Linear4bit(in_features=4096, out_features=4096, bias=False)\n",
       "          (k_proj): Linear4bit(in_features=4096, out_features=4096, bias=False)\n",
       "          (v_proj): Linear4bit(in_features=4096, out_features=4096, bias=False)\n",
       "          (o_proj): Linear4bit(in_features=4096, out_features=4096, bias=False)\n",
       "          (rotary_emb): LlamaRotaryEmbedding()\n",
       "        )\n",
       "        (mlp): LlamaMLP(\n",
       "          (gate_proj): Linear4bit(in_features=4096, out_features=11008, bias=False)\n",
       "          (up_proj): Linear4bit(in_features=4096, out_features=11008, bias=False)\n",
       "          (down_proj): Linear4bit(in_features=11008, out_features=4096, bias=False)\n",
       "          (act_fn): SiLUActivation()\n",
       "        )\n",
       "        (input_layernorm): LlamaRMSNorm()\n",
       "        (post_attention_layernorm): LlamaRMSNorm()\n",
       "      )\n",
       "    )\n",
       "    (norm): LlamaRMSNorm()\n",
       "  )\n",
       "  (lm_head): Linear(in_features=4096, out_features=102400, bias=False)\n",
       ")"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "model = AutoModelForCausalLM.from_pretrained(\n",
    "        '/root/model/deepseek-ai/deepseek-llm-7b-chat/', \n",
    "        trust_remote_code=True, \n",
    "        torch_dtype=torch.half, \n",
    "        device_map=\"auto\",\n",
    "        low_cpu_mem_usage=True,   # 是否使用低CPU内存\n",
    "        load_in_4bit=True,  # 是否在4位精度下加载模型。如果设置为True，则在4位精度下加载模型。\n",
    "        bnb_4bit_compute_dtype=torch.half,  # 4位精度计算的数据类型。这里设置为torch.half，表示使用半精度浮点数。\n",
    "        bnb_4bit_quant_type=\"nf4\", # 4位精度量化的类型。这里设置为\"nf4\"，表示使用nf4量化类型。\n",
    "        bnb_4bit_use_double_quant=True  # 是否使用双精度量化。如果设置为True，则使用双精度量化。\n",
    "    )\n",
    "model.generation_config = GenerationConfig.from_pretrained('/root/model/deepseek-ai/deepseek-llm-7b-chat/')\n",
    "model.generation_config.pad_token_id = model.generation_config.eos_token_id\n",
    "model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "2323eac7-37d5-4288-8bc5-79fac7113402",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.enable_input_require_grads() # 开启梯度检查点时，要执行该方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "f808b05c-f2cb-48cf-a80d-0c42be6051c7",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.float16"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.dtype"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "13d71257-3c1c-4303-8ff8-af161ebc2cf1",
   "metadata": {},
   "source": [
    "# lora "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "2d304ae2-ab60-4080-a80d-19cac2e3ade3",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "LoraConfig(peft_type=<PeftType.LORA: 'LORA'>, auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=<TaskType.CAUSAL_LM: 'CAUSAL_LM'>, inference_mode=False, r=8, target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj'], lora_alpha=32, lora_dropout=0.1, fan_in_fan_out=False, bias='none', modules_to_save=None, init_lora_weights=True, layers_to_transform=None, layers_pattern=None)"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from peft import LoraConfig, TaskType, get_peft_model\n",
    "\n",
    "config = LoraConfig(\n",
    "    task_type=TaskType.CAUSAL_LM, \n",
    "    target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n",
    "    inference_mode=False, # 训练模式\n",
    "    r=8, # Lora 秩\n",
    "    lora_alpha=32, # Lora alaph，具体作用参见 Lora 原理\n",
    "    lora_dropout=0.1# Dropout 比例\n",
    ")\n",
    "config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "2c2489c5-eaab-4e1f-b06a-c3f914b4bf8e",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "LoraConfig(peft_type=<PeftType.LORA: 'LORA'>, auto_mapping=None, base_model_name_or_path='/root/model/deepseek-ai/deepseek-llm-7b-chat/', revision=None, task_type=<TaskType.CAUSAL_LM: 'CAUSAL_LM'>, inference_mode=False, r=8, target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj'], lora_alpha=32, lora_dropout=0.1, fan_in_fan_out=False, bias='none', modules_to_save=None, init_lora_weights=True, layers_to_transform=None, layers_pattern=None)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model = get_peft_model(model, config)\n",
    "config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "ebf5482b-fab9-4eb3-ad88-c116def4be12",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "trainable params: 7,864,320 || all params: 3,882,602,496 || trainable%: 0.20255279823525874\n"
     ]
    }
   ],
   "source": [
    "model.print_trainable_parameters()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ca055683-837f-4865-9c57-9164ba60c00f",
   "metadata": {},
   "source": [
    "# 配置训练参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "7e76bbff-15fd-4995-a61d-8364dc5e9ea0",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "args = TrainingArguments(\n",
    "    output_dir=\"./output/DeepSeek\",\n",
    "    per_device_train_batch_size=1,\n",
    "    gradient_accumulation_steps=1,\n",
    "    logging_steps=10,\n",
    "    num_train_epochs=3,\n",
    "    save_steps=100,\n",
    "    learning_rate=1e-4,\n",
    "    save_on_each_node=True,\n",
    "    gradient_checkpointing=True,\n",
    "    optim=\"paged_adamw_32bit\"  # 优化器类型\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "f142cb9c-ad99-48e6-ba86-6df198f9ed96",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=args,\n",
    "    train_dataset=tokenized_id,\n",
    "    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aec9bc36-b297-45af-99e1-d4c4d82be081",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "4b2cee5d-3d58-4f82-8d26-0eb0158f61f9",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "User: 小姐，别的秀女都在求中选，唯有咱们小姐想被撂牌子，菩萨一定记得真真儿的——\n",
      "\n",
      "Assistant: 姐姐，你别说了，我自有打算。\n"
     ]
    }
   ],
   "source": [
    "text = \"小姐，别的秀女都在求中选，唯有咱们小姐想被撂牌子，菩萨一定记得真真儿的——\"\n",
    "inputs = tokenizer(f\"User: {text}\\n\\n\", return_tensors=\"pt\")\n",
    "outputs = model.generate(**inputs.to(model.device), max_new_tokens=100)\n",
    "\n",
    "result = tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
    "print(result)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
