{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "import warnings\n",
    "\n",
    "from datasets import Dataset\n",
    "from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForSeq2Seq\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "加载模型和分词器",
   "id": "c2bce334d777cdba"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "model_name = \"Langboat/bloom-1b4-zh\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "base_model = AutoModelForCausalLM.from_pretrained(model_name, low_cpu_mem_usage=True)\n",
    "base_model.cuda()"
   ],
   "id": "208a37a16e354e02",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "数据准备\n",
    "1. 注意单次操作和批量操作的区别"
   ],
   "id": "85eba1615271ffef"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "ds = Dataset.load_from_disk(\"data/alpaca_data_zh\")\n",
    "# ds = ds.select(range(4))\n",
    "def process_function(example):\n",
    "    MAX_LEN = 256\n",
    "    instruction = tokenizer(\"\\n\".join([\"Human:\" + example[\"instruction\"], example[\"input\"]]).strip() + \"\\n\\nAssistant:\")\n",
    "    response = tokenizer(example[\"output\"] + tokenizer.eos_token)\n",
    "    input_ids = instruction[\"input_ids\"] + response[\"input_ids\"]\n",
    "    attention_mask = instruction[\"attention_mask\"] + response[\"attention_mask\"]\n",
    "    labels = [-100] * len(instruction[\"input_ids\"]) + response[\"input_ids\"]\n",
    "    if len(input_ids) > MAX_LEN:\n",
    "        input_ids = input_ids[:MAX_LEN]\n",
    "        attention_mask = attention_mask[:MAX_LEN]\n",
    "        labels = labels[:MAX_LEN]\n",
    "\n",
    "    return {\n",
    "        \"input_ids\": input_ids,\n",
    "        \"attention_mask\": attention_mask,\n",
    "        \"labels\": labels\n",
    "    }\n",
    "\n",
    "remove_columns = ds.column_names\n",
    "ds = ds.map(process_function)\n",
    "ds"
   ],
   "id": "8e581ede1b8cef3d",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "# ds = Dataset.load_from_disk(\"data/alpaca_data_zh\")\n",
    "# def batch_process_function(examples, tokenizer=tokenizer):\n",
    "#     MAX_LEN = 256\n",
    "#     batch_size = len(examples[\"instruction\"])\n",
    "#     results = {\n",
    "#         \"input_ids\": [],\n",
    "#         \"attention_mask\": [],\n",
    "#         \"labels\": []\n",
    "#     }\n",
    "#\n",
    "#     for instruction, input, output in zip(examples[\"instruction\"], examples[\"input\"], examples[\"output\"]):\n",
    "#         instruction = tokenizer(\"\\n\".join([\"Human:\" + instruction, input]).strip() + \"\\n\\nAssistant:\")\n",
    "#         response = tokenizer(output + tokenizer.eos_token)\n",
    "#\n",
    "#         input_ids = instruction[\"input_ids\"] + response[\"input_ids\"]\n",
    "#         attention_mask = instruction[\"attention_mask\"] + response[\"attention_mask\"]\n",
    "#         labels = [-100] * len(instruction[\"input_ids\"]) + response[\"input_ids\"]\n",
    "#\n",
    "#         if len(input_ids) > MAX_LEN:\n",
    "#             input_ids = input_ids[:MAX_LEN]\n",
    "#             attention_mask = attention_mask[:MAX_LEN]\n",
    "#             labels = labels[:MAX_LEN]\n",
    "#\n",
    "#         results[\"input_ids\"].append(input_ids)\n",
    "#         results[\"attention_mask\"].append(attention_mask)\n",
    "#         results[\"labels\"].append(labels)\n",
    "#\n",
    "#     return results\n",
    "#\n",
    "#\n",
    "# remove_columns = ds.column_names\n",
    "# ds = ds.map(batch_process_function, batched=True, num_proc=4)\n",
    "# ds"
   ],
   "id": "de7c97bad8f1cd71",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "for idx, (name, params) in enumerate(base_model.named_parameters()):\n",
    "    print(idx, name)\n",
    "    if idx > 20:\n",
    "        break"
   ],
   "id": "f7c1c29222f627d5",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "参数配置",
   "id": "db2bd7b2471fe3f8"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from peft import LoraConfig, TaskType, get_peft_model\n",
    "\n",
    "config = LoraConfig(\n",
    "    task_type=TaskType.CAUSAL_LM,\n",
    "    target_modules=\".*\\.1.*query_key_value\",\n",
    "    modules_to_save=[\"word_embeddings\"]\n",
    ")\n",
    "\n",
    "model = get_peft_model(base_model, config)\n",
    "model.print_trainable_parameters()\n",
    "\n",
    "args = TrainingArguments(\n",
    "    output_dir=\"output/0305\",\n",
    "    per_device_train_batch_size=2,\n",
    "    per_device_eval_batch_size=16,\n",
    "    gradient_accumulation_steps=8,\n",
    "    logging_steps=20,\n",
    "    max_steps=30\n",
    ")\n",
    "\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=args,\n",
    "    train_dataset=ds,\n",
    "    data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer),\n",
    ")\n",
    "\n",
    "trainer.train()"
   ],
   "id": "1aeae84e30c769ce",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from peft import PeftModel\n",
    "\n",
    "load_model = PeftModel.from_pretrained(base_model, model_id=\"output/0305/checkpoint-30\" )\n",
    "load_model"
   ],
   "id": "dec1541958bf877c",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "merged_model = load_model.merge_and_unload()\n",
    "merged_model"
   ],
   "id": "11feac88db6ddb0f",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": "merged_model.save_pretrained(\"output/0305/model\")",
   "id": "fa731428be4c954a",
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
