{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a31cd748-d78f-45e4-8be2-da537ff18de7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import datasets\n",
    "from dataclasses import dataclass\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "import torch\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83f7225f-b05d-428d-8606-23069c6f78ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = \"cuda\"  # the device to load the model onto\n",
    "model_path = \"./Qwen2.5-0.5B\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "42f129f8-0bc5-4b29-94ec-71ba0b51d598",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = AutoModelForCausalLM.from_pretrained(\n",
    "    model_path,\n",
    "    dtype='auto',\n",
    "    device_map='auto'\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c1c57c03-0b6a-43f3-b3a1-12d14dedb7b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aaeb66e5-a7c1-43ff-a745-fcabf3beb92e",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.generation_config.do_sample = True\n",
    "model.generation_config.eos_token_id = [151645, 151643]\n",
    "model.generation_config.pad_token_id = 151643\n",
    "model.generation_config.temperature = 0.7\n",
    "model.generation_config.top_p = 0.8\n",
    "model.generation_config.top_k = 20\n",
    "model.generation_config.repetition_penalty = 1.05"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "30361547-0a40-4493-9cfd-16aab5e1e780",
   "metadata": {},
   "outputs": [],
   "source": [
    "@dataclass\n",
    "class SFTConfig:\n",
    "    max_length: int = 2500\n",
    "    batch_size: int = 2\n",
    "    gradient_accumulation_steps: int = 8\n",
    "    log_iter: int = 400\n",
    "    max_lr: float = 2e-5\n",
    "    min_lr: float = 2e-6\n",
    "    warmup_steps: int = 1000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "86eed066-0e78-476d-94b6-e33c4ceb917b",
   "metadata": {},
   "outputs": [],
   "source": [
    "ultrachat_200k_data = datasets.load_dataset('./ultrachat_200k')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "69bca6ea-44b9-42d9-9728-3fed53aa7944",
   "metadata": {},
   "outputs": [],
   "source": [
    "ultrachat_200k_data.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "325c6459-eff9-47b8-a2a2-fa225b2e4445",
   "metadata": {},
   "outputs": [],
   "source": [
    "ultrachat_200k_data['train_sft'][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "772359ed-fdf9-4b5a-bb2d-54a6ebf45779",
   "metadata": {},
   "outputs": [],
   "source": [
    "def tokenize_and_format(data):\n",
    "    input_ids = tokenizer.apply_chat_template(\n",
    "        data,\n",
    "        tokenize=True,\n",
    "        add_generation_prompt=False,\n",
    "        truncation=True,\n",
    "        max_length=SFTConfig.max_length,\n",
    "    )\n",
    "\n",
    "    return input_ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1b32cd1f-3b32-48d4-aba9-25b3a647729a",
   "metadata": {},
   "outputs": [],
   "source": [
    "input_ids = tokenize_and_format(ultrachat_200k_data['train_sft'][0]['messages'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ee668e87-ad08-4918-aa96-79ca3b147de0",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(tokenizer.decode(input_ids))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3bf0acaa-cb10-479d-b5ca-5e28e3de52f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成训练数据的tokenid\n",
    "chosen_input_ids_list = []\n",
    "i = 0\n",
    "while True:\n",
    "    data = ultrachat_200k_data['train_sft'][i]['messages']\n",
    "    data.insert(\n",
    "        0, {\"content\": \"You are a helpful assistant\", \"role\": \"system\"})\n",
    "    input_ids = tokenize_and_format(data)\n",
    "    chosen_input_ids_list.append(input_ids)\n",
    "    i += 1\n",
    "    if i % 1000 == 0:\n",
    "        print(f\"已处理{i}条数据\")\n",
    "    if i == 50000:  # len(ultrachat_200k_data['train_sft']):\n",
    "        break"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "88537750-ed5c-4132-abaa-7a924cca0b3a",
   "metadata": {},
   "source": [
    "# 使用设置的训练超参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7805f6c2-e41f-4ea2-870f-36c955da1b49",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = SFTConfig.batch_size\n",
    "gradient_accumulation_steps = SFTConfig.gradient_accumulation_steps\n",
    "log_iter = SFTConfig.log_iter\n",
    "max_lr = SFTConfig.max_lr\n",
    "min_lr = SFTConfig.min_lr\n",
    "warmup_steps = SFTConfig.warmup_steps\n",
    "total_steps = len(chosen_input_ids_list)//batch_size\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr)\n",
    "trainable_parameters_num = sum(p.numel() for p in filter(\n",
    "    lambda p: p.requires_grad, model.parameters()))  # 全参微调"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "354e5ab8-7e10-4c76-8a86-f82f48d3aac2",
   "metadata": {},
   "source": [
    "# 配置logging日志记录模型训练过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dd57185d-85c1-45bc-972c-802b173b5984",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 配置logging\n",
    "with open(f\"./Qwen2.5-0.5B-SFT_log.txt\", \"a\") as my_file:\n",
    "    my_file.write(f'time:{time.strftime(\"%Y-%m-%d, %H:%M:%S\")}, batch_size:{batch_size}, trainable_parameters_num:{trainable_parameters_num}, warmup_steps:{warmup_steps}, max_lr:{max_lr}, min_lr:{min_lr}\\n')\n",
    "# 定义一个日志记录函数\n",
    "\n",
    "\n",
    "def log_call(iters, iters_average_loss):\n",
    "    with open(f\"./{model_name}-SFT_log.txt\", \"a\") as my_file:\n",
    "        my_file.write(\n",
    "            f'time:{time.strftime(\"%Y-%m-%d, %H:%M:%S\")}, iters:{iters+1}, iters_average_Loss:{iters_average_loss:.4f}\\n')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "48ca8382-e11c-4f8d-afa9-d9e690713f65",
   "metadata": {},
   "source": [
    "# 学习率设置：余弦衰减学习率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9bc90b01-c727-46a3-8b17-c077c9a3d25a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def linear_warmup(current_step, warmup_steps, max_lr):\n",
    "    if current_step < warmup_steps:\n",
    "        return max_lr * current_step / warmup_steps\n",
    "    else:\n",
    "        return max_lr\n",
    "\n",
    "\n",
    "def cosine_decay(current_step, warmup_steps, total_steps, max_lr, min_lr):\n",
    "    if current_step < warmup_steps:\n",
    "        return linear_warmup(current_step, warmup_steps, max_lr)\n",
    "    else:\n",
    "        progress = (current_step - warmup_steps) / (total_steps - warmup_steps)\n",
    "        decay = 0.5 * (1 + np.cos(np.pi * progress))\n",
    "        return (max_lr - min_lr) * decay + min_lr"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "51b35621-cd2a-41f4-b162-7649fdbc0c57",
   "metadata": {},
   "source": [
    "# 掩码设置\n",
    "\n",
    "- 基于指令的SFT和预训练的区别核心就是掩码掉“问题”部分的损失，而 **只看“回答”部分的损失** ，并仅基于回答部分的损失进行优化\n",
    "- 实现方式： **构造损失掩码** ，仅针对每轮对话（含多轮）的模型“输出”部分（也就是回答部分）进行损失计算\n",
    "\n",
    "设置问题部分的掩码函数，用于执行仅针对回答部分才计算损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8f999f57-2861-479f-9212-04fc321423ad",
   "metadata": {},
   "outputs": [],
   "source": [
    "def return_answer_mask(input_ids):\n",
    "    assistant_answer_mask = torch.zeros_like(input_ids)  # 0初始化\n",
    "    for i in range(input_ids.shape[0]):\n",
    "        # user部分的结尾\\n: \\n是<|im_end|>的下一个元素，所以有+1 \n",
    "        # 这个地方需要根据不同模型的不同聊天模版自定义更改，\n",
    "        # 关于聊天模版可阅读这篇文章：https://huggingface.co/blog/chat-templates\n",
    "        i_user_end_list = [\n",
    "            i+1 for i in torch.where(input_ids[i] == tokenizer.encode('<|im_end|>')[0])[0].tolist()[1::2]]\n",
    "        # assistant部分的结尾\\n：\\n是<|im_end|>的下一个元素，所以有+1 \n",
    "        # 这个地方需要根据不同模型的不同聊天模版自定义更改\n",
    "        i_assistant_end_list = [\n",
    "            i+1 for i in torch.where(input_ids[i] == tokenizer.encode('<|im_end|>')[0])[0].tolist()[2::2]]\n",
    "\n",
    "        if len(i_user_end_list) == len(i_assistant_end_list):\n",
    "            for user_end, assistant_end in zip(i_user_end_list, i_assistant_end_list):\n",
    "                # +3的操作，【这个地方需要根据不同模型的不同聊天模版自定义更改】\n",
    "                assistant_answer_mask[i][user_end+3:assistant_end-1] = 1\n",
    "        elif len(i_user_end_list) == len(i_assistant_end_list)+1 == 1:  # 单轮问答,且回答部分未结尾就被截断了\n",
    "            # 会把右补的padding token也标记为1，所以后面还需要再结合padding mask以过滤padding\n",
    "            assistant_answer_mask[i][i_user_end_list[0]+3:] = 1\n",
    "        elif len(i_user_end_list) == len(i_assistant_end_list)+1:  # 兼顾多轮问答\n",
    "            assistant_answer_mask[i][i_user_end_list[-1]+3:] = 1\n",
    "            for user_end, assistant_end in zip(i_user_end_list[:-1], i_assistant_end_list):\n",
    "                assistant_answer_mask[i][user_end+3:assistant_end-1] = 1\n",
    "        else:\n",
    "            continue  # 跳出当前循环，继续下一次循环\n",
    "    return assistant_answer_mask"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "608f3c7b-452b-4485-a1fb-a25210c4d8d2",
   "metadata": {},
   "source": [
    "# 开启SFT微调训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "19a442cd-18fc-4d0b-b1ab-c84a8f504e3d",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.train()\n",
    "train_loss_list = []\n",
    "model.zero_grad()  # 训练之前清空梯度\n",
    "ignore_iters_count = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6d203a72-454a-44df-b3b7-7f00c50993f2",
   "metadata": {},
   "outputs": [],
   "source": [
    "for iters in range(len(chosen_input_ids_list)//batch_size):\n",
    "    # 获取批次数据\n",
    "    chosen_batch_inputids = chosen_input_ids_list[iters * batch_size:(iters+1)*batch_size]\n",
    "\n",
    "    # 对该批次数据进行padding,以并行计算，首先计算该批次的最大token长度\n",
    "    chosen_max_dim = max([len(i) for i in chosen_batch_inputids])\n",
    "\n",
    "    # 训练数据padding填充\n",
    "    chosen_batch_inputids_padding_list = []\n",
    "    for i in range(batch_size):\n",
    "        chosen_batch_inputids_padding_list.append(torch.nn.functional.pad(torch.tensor(chosen_batch_inputids[i]), (\n",
    "            # 右补\n",
    "            0, chosen_max_dim - len(chosen_batch_inputids[i])), mode='constant', value=model.generation_config.eos_token_id[-1]).tolist())\n",
    "    chosen_batch_inputids_tensor = torch.tensor(\n",
    "        chosen_batch_inputids_padding_list)\n",
    "\n",
    "    # 构建训练数据：x->y ,下一个单词预测\n",
    "    chosen_x = chosen_batch_inputids_tensor[:, :-1].to(device)\n",
    "    chosen_y = chosen_batch_inputids_tensor[:, 1:].to(device)\n",
    "\n",
    "    # 构建掩码判别矩阵（paddding mask & answer_mask, \n",
    "    # padding mask用于执行对padding的token不计算损失，answer_mask用于执行仅针对回答部分才计算损失），\n",
    "    # 总之，就是确认哪些tokens的logit需要\"忽视\"掉\n",
    "    # 【padding mask】\n",
    "    chosen_padding_mask = torch.where(\n",
    "        chosen_y == model.generation_config.eos_token_id[-1], 0, 1)\n",
    "    # 【answer_mask】\n",
    "    chosen_assistant_answer_mask = return_answer_mask(chosen_x)\n",
    "    # 【paddingmask & answermask】方便使用掩码判别矩阵对logit和y进行过滤->:我们只关注【回答】部分的损失，不关注问题部分的损失\n",
    "    chosen_assistant_answer_mask = (\n",
    "        chosen_assistant_answer_mask & chosen_padding_mask)\n",
    "\n",
    "    # 如果该批次里有的问答数据在数据截取时，回答部分存在没有数据的情况（问题太长了，导致还未采集到回答部分的token就被硬截断了），那么该批次数据不再训练\n",
    "    if chosen_assistant_answer_mask.sum(dim=-1).min().item() == 0:\n",
    "        # print(f'不处理第{iters+1}批次数据')\n",
    "        ignore_iters_count += 1\n",
    "        continue  # 跳出当前循环\n",
    "\n",
    "    # 执行训练数据的模型前向推理，计算logits\n",
    "    chosen_logits = model(chosen_x).logits\n",
    "    torch.cuda.empty_cache()  # 清除非必要的显存占用，但会导致速度变慢\n",
    "    torch.cuda.ipc_collect()\n",
    "\n",
    "    # Compute Chosen_Answer_Loss，计算训练数据的回答部分的损失, batch_loss的shape_size:[batch_size]\n",
    "    batch_loss = torch.mul((torch.gather(torch.log(torch.softmax(chosen_logits, dim=-1)), dim=-1, index=chosen_y.unsqueeze(2))\n",
    "                           * (-1)).squeeze(2), chosen_assistant_answer_mask).sum(dim=-1) / chosen_assistant_answer_mask.sum(dim=-1)\n",
    "\n",
    "    # Calculate the Final Loss, 只是新增了梯度积累的操作\n",
    "    loss = torch.nanmean(batch_loss)/(gradient_accumulation_steps)\n",
    "\n",
    "    loss.backward()  # 反向传播计算梯度\n",
    "\n",
    "    # 为当前step计算学习率\n",
    "    lr = cosine_decay(iters, warmup_steps, total_steps, max_lr, min_lr)\n",
    "\n",
    "    # 为AdamW优化器更新学习率\n",
    "    for param_group in optimizer.param_groups:\n",
    "        param_group['lr'] = lr\n",
    "\n",
    "    if (iters+1) % gradient_accumulation_steps == 0 or (iters+1) == (len(chosen_input_ids_list)//batch_size):\n",
    "        optimizer.step()  # 梯度累积之后更新权重\n",
    "        # 清空梯度\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "    train_loss_list.append(loss.item()*gradient_accumulation_steps)\n",
    "\n",
    "    if (iters+1) % log_iter == 0 or (iters+1) == (len(chosen_input_ids_list)//batch_size):\n",
    "        # 避免空值影响\n",
    "        print(\n",
    "            f'time:{time.strftime(\"%Y-%m-%d, %H:%M:%S\")}, iters:{iters+1}, last_{log_iter}_iters_average_train_Loss:{np.nanmean(train_loss_list[-log_iter:]):.4f}')\n",
    "        log_call(iters, np.nanmean(train_loss_list[-log_iter:]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "156cbd00-b781-497e-863a-3a0388251a4d",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"训练完成\")\n",
    "print(f'共计忽略{ignore_iters_count}个批次数据')\n",
    "model.save_pretrained(\"./Qwen2.5-0.5B-SFT/\")\n",
    "tokenizer.save_pretrained(\"./Qwen2.5-0.5B-SFT/\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
