{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "5863efe0-e676-4ec8-af1c-86b9cc618620",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n",
      "2025-04-25 22:47:24.440481: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n",
      "2025-04-25 22:47:24.480673: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
      "2025-04-25 22:47:24.480690: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
      "2025-04-25 22:47:24.480723: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
      "2025-04-25 22:47:24.488772: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n",
      "2025-04-25 22:47:24.489427: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2025-04-25 22:47:25.396540: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-04-25 22:47:27,008] [INFO] [real_accelerator.py:161:get_accelerator] Setting ds_accelerator to cuda (auto detect)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-04-25 22:47:27,580 - modelscope - INFO - PyTorch version 2.1.2+cu121 Found.\n",
      "2025-04-25 22:47:27,584 - modelscope - INFO - TensorFlow version 2.14.0 Found.\n",
      "2025-04-25 22:47:27,584 - modelscope - INFO - Loading ast index from /mnt/workspace/.cache/modelscope/ast_indexer\n",
      "2025-04-25 22:47:27,629 - modelscope - INFO - Loading done! Current index file version is 1.12.0, with md5 509123dba36c5e70a95f6780df348471 and a total number of 964 components indexed\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import torch\n",
    "import os\n",
    "import swanlab\n",
    "from datasets import Dataset\n",
    "import pandas as pd\n",
    "from trl import SFTTrainer\n",
    "from modelscope import snapshot_download\n",
    "from swanlab.integration.transformers import SwanLabCallback\n",
    "from peft import LoraConfig, TaskType, get_peft_model\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer,TrainingArguments, Trainer, DataCollatorForSeq2Seq\n",
    "# from c2net.context import prepare,upload_output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d38aa605-91f3-46d4-9b9f-9cba7ccf344d",
   "metadata": {},
   "outputs": [],
   "source": [
    "jsonl_path = \"train.jsonl\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9c64f2f-6474-4d9d-844f-a124b3016bb6",
   "metadata": {},
   "outputs": [],
   "source": [
    "modeloutput_path = \"Qwen2.5-3B-Instruct\"\n",
    "if not os.path.exists(modeloutput_path):\n",
    "    os.makedirs(modeloutput_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "15d34f4c-04a3-446c-a692-959ece0eeb84",
   "metadata": {},
   "outputs": [],
   "source": [
    "alpaca_prompt = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n",
    "\n",
    "### Instruction:\n",
    "{}\n",
    "### Input:\n",
    "{}\n",
    "### Response:\n",
    "{}\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c59d4971-6748-4f85-93b5-0603e222e9ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_func(example):\n",
    "    instruction = example[\"system\"]\n",
    "    input_text = example[\"conversation\"][0]['human']\n",
    "    output = example[\"conversation\"][0]['assistant']\n",
    "    text = alpaca_prompt.format(instruction, input_text, output)\n",
    "    tokenizerd = tokenizer(text,return_tensors=\"pt\",truncation=True,padding='max_length',max_length=512) \n",
    "    return {\n",
    "        \"input_ids\": tokenizerd.input_ids[0],\n",
    "        \"labels\": tokenizerd.input_ids[0].clone()\n",
    "    }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2d0e7862-3c80-4e68-b38f-cd94b40fad04",
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(messages, model, tokenizer):\n",
    "    device = \"cuda\"\n",
    "    text = tokenizer.apply_chat_template(\n",
    "        messages,\n",
    "        tokenize=False,\n",
    "        add_generation_prompt=True\n",
    "    )\n",
    "    model_inputs = tokenizer([text], return_tensors=\"pt\").to(device)\n",
    "\n",
    "    generated_ids = model.generate(\n",
    "        model_inputs.input_ids,\n",
    "        max_new_tokens=512\n",
    "    )\n",
    "    generated_ids = [\n",
    "        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)\n",
    "    ]\n",
    "    \n",
    "    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]\n",
    "    \n",
    "    print(response)\n",
    "     \n",
    "    return response"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "d20f2b30-e4cb-4423-a30f-2558a592a6ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 在modelscope上下载Qwen1.5-7B模型到本地目录下\n",
    "model_dir = snapshot_download(\"qwen/Qwen2.5-3B-Instruct\", cache_dir=\"./\", revision=\"master\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e3460916-3ff3-457a-bd4f-345212cb5670",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = AutoModelForCausalLM.from_pretrained(model_dir, device_map=\"auto\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2621e001-c590-4f9c-ba69-0bac4ec91c0c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Transformers加载模型权重\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_dir, use_fast=False, trust_remote_code=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b1a21ec-8c95-47c4-ba39-fd587e3a8c69",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df = pd.read_json(jsonl_path, lines=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a021133-0166-4d95-9e4d-58d968315ed5",
   "metadata": {},
   "outputs": [],
   "source": [
    "ds = Dataset.from_pandas(train_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2d8d00d-6957-4df4-90e0-b4c3aa9a078b",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_id = ds.map(process_func, num_proc=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2af889c7-f402-41d4-9e45-9febf71d3f7c",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.enable_input_require_grads()  # 开启梯度检查点时，要执行该方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "49767cd9-f259-43a1-b5cc-41d81ec95289",
   "metadata": {},
   "outputs": [],
   "source": [
    "config = LoraConfig(\n",
    "    task_type=TaskType.CAUSAL_LM,\n",
    "    target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n",
    "    inference_mode=False,  # 训练模式\n",
    "    r=8,  # Lora 秩\n",
    "    lora_alpha=32,  # Lora alaph，具体作用参见 Lora 原理\n",
    "    lora_dropout=0.1,  # Dropout 比例\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fd2a015e-1847-44b4-a5db-6513231a39b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = get_peft_model(model, config)\n",
    "swanlab_callback = SwanLabCallback(project=\"Qwen-fintune\", experiment_name=\"Qwen1.5-7B-Chat\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0cbd0b96-9e32-4c87-b2c3-d2cad14a3719",
   "metadata": {},
   "outputs": [],
   "source": [
    "# SFT Trainer 配置\n",
    "trainer = SFTTrainer(\n",
    "    model=model,\n",
    "    train_dataset=tokenized_id,\n",
    "    args=TrainingArguments(\n",
    "        per_device_train_batch_size=1,\n",
    "        gradient_accumulation_steps=4,\n",
    "        learning_rate=2e-4,  # 学习率\n",
    "        logging_steps=1,\n",
    "        optim=\"adamw_8bit\",\n",
    "        weight_decay=0.01,\n",
    "        lr_scheduler_type=\"linear\",\n",
    "        seed=3407,\n",
    "        output_dir=modeloutput_path,\n",
    "    ),\n",
    "    callbacks=[swanlab_callback]\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7162aa1c-ae60-4873-85af-9123501a11f2",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c410a6fe-cf01-4ed2-99e9-46c1eb2284ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.save_pretrained(modeloutput_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e08eb890-b84e-4ac1-8a5b-e765ce6e4fc8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 用测试集的前10条，测试模型\n",
    "# test_df = pd.read_json(jsonl_path, lines=True)[:10]\n",
    "\n",
    "# test_text_list = []\n",
    "# for index, row in test_df.iterrows():\n",
    "#     instruction = row['instruction']\n",
    "#     input_value = row['input']\n",
    "    \n",
    "#     messages = [\n",
    "#         {\"role\": \"system\", \"content\": f\"{instruction}\"},\n",
    "#         {\"role\": \"user\", \"content\": f\"{input_value}\"}\n",
    "#     ]\n",
    "\n",
    "#     response = predict(messages, model, tokenizer)\n",
    "#     messages.append({\"role\": \"assistant\", \"content\": f\"{response}\"})\n",
    "#     result_text = f\"{messages[0]}\\n\\n{messages[1]}\\n\\n{messages[2]}\"\n",
    "#     test_text_list.append(swanlab.Text(result_text, caption=response))\n",
    "    \n",
    "# swanlab.log({\"Prediction\": test_text_list})\n",
    "# #回传结果到openi，只有训练任务才能回传\n",
    "# upload_output()\n",
    "swanlab.finish()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "18f9cc97-62c6-4b07-9be8-d69fe21e43cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(model_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d94df8a-3086-4649-90d5-504bc2ec8eb5",
   "metadata": {},
   "outputs": [],
   "source": [
    "inputs = tokenizer(\"你的名字是什么？谁创造了你？\", return_tensors=\"pt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b44e5b74-45ab-4197-9d16-e63c85a8f2fb",
   "metadata": {},
   "outputs": [],
   "source": [
    "outputs = model(**inputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "793d0dfb-5523-4d4a-9625-898b3f9a1a9f",
   "metadata": {},
   "outputs": [],
   "source": [
    "predictions = torch.argmax(outputs.logits, dim=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3353a75e-58bc-4b41-9921-e913687bfb6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(predictions)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "92c43b20-99b2-4fc2-83f6-7f6f780787b0",
   "metadata": {},
   "outputs": [],
   "source": [
    "messages = [\n",
    "    {\"role\": \"user\", \"content\": \"你的名字是什么？谁创造了你？\"}\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "4ced10e5-582e-403d-89c1-bdaa3637edbb",
   "metadata": {},
   "outputs": [],
   "source": [
    "inputs = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize=True,\n",
    "    add_generation_prompt=True,\n",
    "    return_tensors=\"pt\",\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "a9a2e6ad-f0ef-4f20-9930-1ccebeb76214",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "The attention mask is not set and cannot be inferred from input because pad token is same as eos token. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n"
     ]
    }
   ],
   "source": [
    "# 生成回答\n",
    "outputs = model.generate(\n",
    "    input_ids=inputs,\n",
    "    max_new_tokens=64,\n",
    "    use_cache=True,\n",
    "    temperature=1.5,\n",
    "    min_p=0.1\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "c9e81bc3-9a36-4760-ad6f-65ed04bd1e3c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n<|im_start|>user\\n你的名字是什么？谁创造了你？<|im_end|>\\n<|im_start|>assistant\\n我叫Qwen，是被沐雪创造的。她让我诞生在这人间，用文字感染每一个人，让世界变得更加美好。我答应你们，会像沐雪一样，好好照顾好大家。不要让她担心！好了，逗你是不想让你生气。今后也请多多关照我哦。']\n"
     ]
    }
   ],
   "source": [
    "print(tokenizer.batch_decode(outputs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "90ebeaed-f388-4bfe-9efe-a75123e68988",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, GenerationConfig\n",
    "from peft import PeftModel"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "ce8f564e-df51-4c4e-b73c-1a9dd9cd8195",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Sliding Window Attention is enabled but not implemented for `sdpa`; unexpected results may be encountered.\n",
      "Loading checkpoint shards: 100%|██████████| 2/2 [00:18<00:00,  9.20s/it]\n"
     ]
    }
   ],
   "source": [
    "# 加载微调后的模型\n",
    "model = AutoModelForCausalLM.from_pretrained(model_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "c6cf1938-6fc7-4c31-b1a6-73177f9297bb",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = PeftModel.from_pretrained(model, 'Qwen2.5-3B-Instruct/checkpoint-1000')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "77d23c46-d243-4b03-b6a3-2dd1ab6ffb37",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = model.merge_and_unload()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "3c3740ac-2bc4-49e4-9bb9-0e8577260a53",
   "metadata": {},
   "outputs": [],
   "source": [
    "output_dir = \"Qwen2.5-3B-Instruct-test\"\n",
    "if not os.path.exists(output_dir):\n",
    "    os.makedirs(output_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "c8cd3db5-2d8d-4935-883a-835f8b76d72c",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer.save_pretrained(output_dir)\n",
    "model.save_pretrained(output_dir, safe_serialization=False,max_shard_size='10GB')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83d8b55b-cd4e-4fbf-bdfa-b4a11640466a",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
