{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "7fc27612-b057-4306-960c-5f9edbc335fa",
   "metadata": {},
   "source": [
    "到24年Q4季度，微调大模型，已经变成了非常容易的一个事情，可以在T4，V100等卡上微调大模型。\n",
    "\n",
    "最简单的微调工具包：https://github.com/hiyouga/LLaMA-Factory\n",
    "可以通过配置数据，模型，存储路径等的yaml，通过命令行一键启动训练。\n",
    "\n",
    "llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml\n",
    "\n",
    "后文为unsloth训练qwen示例\n",
    "\n",
    "https://colab.research.google.com/drive/1mvwsIQWDs2EdZxZQF9pRGnnOvE86MVvR?usp=sharing#scrollTo=2eSvM9zX_2d3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f6acc1f-b1cb-4449-80d2-4067d2b86fac",
   "metadata": {},
   "outputs": [],
   "source": [
    "from unsloth import FastLanguageModel\n",
    "import torch\n",
    "\n",
    "model, tokenizer = FastLanguageModel.from_pretrained(\n",
    "    model_name = \"unsloth/Qwen2.5-7B-Instruct\",\n",
    "    max_seq_length = 10000,\n",
    "    dtype = None,\n",
    "    load_in_4bit = True,\n",
    ")\n",
    "\n",
    "model = FastLanguageModel.get_peft_model(\n",
    "    model,\n",
    "    r = 16, \n",
    "    target_modules = [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n",
    "                      \"gate_proj\", \"up_proj\", \"down_proj\",],\n",
    "    lora_alpha = 16,\n",
    "    lora_dropout = 0,\n",
    "    bias = \"none\",\n",
    "    use_gradient_checkpointing = \"unsloth\",\n",
    "    random_state = 3407,\n",
    "    use_rslora = False,\n",
    "    loftq_config = None,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c483c806-0017-4702-820f-3a84c6c763f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "rag_prompt = \"\"\"DOCUMENT：  \n",
    "{}\n",
    "\n",
    "QUESTION：  \n",
    "{}\n",
    "\n",
    "INSTRUCTIONS：  \n",
    "使用上述文档内容回答用户的问题, 确保你的回答基于文档中的事实。  \n",
    "如果文档中没有足够的事实来回答该问题，请返回 “抱歉，我不知道”\"\"\"\n",
    "\n",
    "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n",
    "def formatting_prompts_func(examples):\n",
    "    context = examples[\"context\"]\n",
    "    query   = examples[\"query\"]\n",
    "    answer  = examples[\"answer\"]\n",
    "    \n",
    "    texts = []\n",
    "    for docs, input, output in zip(context, query, answer):\n",
    "        text = rag_prompt.format(docs, input)\n",
    "        \n",
    "        messages = [\n",
    "            {\"role\": \"system\", \"content\": \"You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\"},\n",
    "            {\"role\": \"user\", \"content\": text},\n",
    "            {\"role\": \"assistant\", \"content\": output}\n",
    "        ]\n",
    "        \n",
    "        text = tokenizer.apply_chat_template(\n",
    "                messages,\n",
    "                tokenize=False,\n",
    "                add_generation_prompt=False\n",
    "            )\n",
    "        texts.append(text)\n",
    "        \n",
    "    return { \"text\" : texts,}\n",
    "\n",
    "from datasets import load_dataset\n",
    "dataset = load_dataset('csv', data_files='./sft_data.csv',split='train')\n",
    "dataset = dataset.map(formatting_prompts_func, batched = True,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f915a64-2f1e-4552-9ed2-c82188b10767",
   "metadata": {},
   "outputs": [],
   "source": [
    "from trl import SFTTrainer\n",
    "from transformers import TrainingArguments\n",
    "from unsloth import is_bfloat16_supported\n",
    "\n",
    "trainer = SFTTrainer(\n",
    "    model = model,\n",
    "    tokenizer = tokenizer,\n",
    "    train_dataset = dataset,\n",
    "    dataset_text_field = \"text\",\n",
    "    max_seq_length = 10000,\n",
    "    dataset_num_proc = 2,\n",
    "    args = TrainingArguments(\n",
    "        per_device_train_batch_size = 2,\n",
    "        gradient_accumulation_steps = 4,\n",
    "\n",
    "        warmup_steps = 20,\n",
    "        num_train_epochs = 1,\n",
    "        learning_rate = 2e-4,\n",
    "        fp16 = not is_bfloat16_supported(),\n",
    "        bf16 = is_bfloat16_supported(),\n",
    "        logging_steps = 1,\n",
    "        optim = \"adamw_8bit\",\n",
    "        weight_decay = 0.01,\n",
    "        lr_scheduler_type = \"linear\",\n",
    "        seed = 3407,\n",
    "        output_dir = \"outputs\",\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b970fd02-b448-4d72-bf78-618dc060fd59",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer_stats = trainer.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "419132e1-dd14-4e2d-83af-2f9924414b39",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.save_pretrained(\"lora_model\")\n",
    "tokenizer.save_pretrained(\"lora_model\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fea9aee4-7c32-416d-a5de-0c18cb8bb60b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e43a4934-ac18-48c7-aa5c-753a788e4167",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "feeb6731-e6ef-429a-b8ef-911305a9e40e",
   "metadata": {},
   "source": [
    "### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ccc5a0f6-21de-425c-a9ef-dca8c8647a81",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "\n",
    "model_name = \"unsloth/Qwen2.5-7B-Instruct\"\n",
    "\n",
    "\n",
    "rag_prompt = \"\"\"DOCUMENT：  \n",
    "{}\n",
    "\n",
    "QUESTION：  \n",
    "{}\n",
    "\n",
    "INSTRUCTIONS：  \n",
    "使用上述文档内容回答用户的问题, 确保你的回答基于文档中的事实。  \n",
    "如果文档中没有足够的事实来回答该问题，请返回 “抱歉，我不知道”\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "test_docs = \"\"\"作者：古都闲云\n",
    "链接：https://www.zhihu.com/question/780044404/answer/4520896596\n",
    "来源：知乎\n",
    "著作权归作者所有。商业转载请联系作者获得授权，非商业转载请注明出处。\n",
    "\n",
    "这个数据在往年，都是假期最后一天晚上七八点就会公布的，今年延后到了假期结束次日的下午3点后发布，所以没有引起太大关注。对假期国内游的数据统计，在口径上略有调整，过去是“接待国内游客人次”，现在是“全国国内出游人次”。所以，我们可以根据公布增速反推计算同比增速时使用的不同口径的基数，并且能够得到计算2023年国庆中秋八天连假时的“可比基数”。按公布数据，2024年国庆节假日7天，全国国内出游7.65亿人次，按可比口径同比增长5.9%，较2019年同期增长10.2%；国内游客出游总花费7008.17亿元，按可比口径同比增长6.3%，较2019年同期增长7.9%。通过以上数字和增速，我们可以反推得到2023年国庆可比的七天化的“全国国内出游人次”是7.224亿人次，对应可比的七天化的总花费是6592.8亿元；反推得到2019年国庆可比的“全国国内出游人次”是6.942亿人次，对应可比的总花费是6495.06亿元。我们看2023年数据，2023年国庆中秋八天连假公布的“全国国内出游人次”是8.26亿人次，按可比口径比2019年增长4.1%；“国内旅游收入”是7534.3亿元，按可比口径比2019年增长1.5%。查看2023年情况，我们上面得到的反推得到的2023年七天化可比数据与2019年可比数据在验算之后，也是吻合的，说明2024年的计算口径与2023年一致。我们再看2019年公布数据，国庆七天全国共接待国内游客7.82亿人次，实现国内旅游收入6497.1亿元。我们能发现，2019年公布的旅游收入能够对应上我们在前面的反推（公布6497.1≈推算6495.06），但公布的人次数据方面差距较大（公布7.82亿＞＞推算6.942亿）。这显示，在没有改变国内旅游收入统计口径的前提下，却改变了国内旅游人次的统计口径，这是把人次统计压低了，使得2024年公布的7.65亿人次反而小于2019年的7.82亿人次。并且，这种做法在另一方面的作用是使计算得到的人均花费上升。如果我们参考2019年的可比人次数据被缩小的同比例，可以推算得到2024年的、以2019年口径标准统计的人次数据，是8.618亿人次。那么，以2019年口径为基准：修正后的2024年的人次均花费为813.2元、修正后的2023年的人次均花费为810.16元，都没有恢复到2019年的人次均花费830.8元的水平。即使我们不采纳这种修正口径，也可以看到2024年的数据相比2023年实际上进步非常微弱。假期期间，不乏有媒体发出因股市大涨居民敢于消费的报道，但通过数据我们显然能够发现这种报道是不客观的。居民预期依然偏弱，消费意愿并没有马上扭转。联想到数据发布时间比平常晚了近20个小时，并且是选取交易时间结束后低调发布，或许可以联想这是为了避免旅游数据对于节后股市产生不利影响。\"\"\"\n",
    "\n",
    "model = AutoModelForCausalLM.from_pretrained(\n",
    "    model_name,\n",
    "    torch_dtype=\"auto\",\n",
    "    device_map=\"auto\"\n",
    ")\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "\n",
    "messages = [\n",
    "    {\"role\": \"system\", \"content\": \"You are Qwen, created by Alibaba Cloud. You are a helpful assistant.\"},\n",
    "    {\"role\": \"user\", \"content\": rag_prompt.format(test_docs, '小米su7卖的怎么样？')}\n",
    "]\n",
    "text = tokenizer.apply_chat_template(\n",
    "    messages,\n",
    "    tokenize=False,\n",
    "    add_generation_prompt=True\n",
    ")\n",
    "model_inputs = tokenizer([text], return_tensors=\"pt\").to(model.device)\n",
    "\n",
    "generated_ids = model.generate(\n",
    "    **model_inputs,\n",
    "    max_new_tokens=512\n",
    ")\n",
    "generated_ids = [\n",
    "    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)\n",
    "]\n",
    "\n",
    "response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]\n",
    "print(response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3e5c3ce6-aaa6-4d92-8671-706b8fc5bca2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from peft import PeftModel\n",
    "model = PeftModel.from_pretrained(model,'lora_model')\n",
    "generated_ids = model.generate(\n",
    "    **model_inputs,\n",
    "    max_new_tokens=512\n",
    ")\n",
    "generated_ids = [\n",
    "    output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)\n",
    "]\n",
    "\n",
    "response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]\n",
    "print(response)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.0rc2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
