{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import datasets\n",
    "import evaluate\n",
    "import torch\n",
    "import json\n",
    "import importlib\n",
    "from datasets import load_dataset\n",
    "import collections\n",
    "from datasets import load_dataset, DatasetDict\n",
    "from transformers import DefaultDataCollator\n",
    "from transformers import AutoTokenizer, AutoModelForMultipleChoice, Trainer, TrainingArguments\n",
    "from peft import LoraConfig, TaskType, get_peft_model\n",
    "# from transformers import AutoTokenizer,AutoModelForCausalLM\n",
    "\n",
    "MODEL_PATH = \"/data/models/huggingface/chinese-macbert-large\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)\n",
    "model =  AutoModelForMultipleChoice.from_pretrained(MODEL_PATH)\n",
    "model.enable_input_require_grads()\n",
    "# for name,param in model.named_parameters():\n",
    "#     print(name)\n",
    "target_modules = []  \n",
    "for name, module in model.named_modules():  \n",
    "    if name:\n",
    "        try:\n",
    "            # module = importlib.import_module(name) \n",
    "            # print(str(module.__class__))\n",
    "            if \"Linear\" in str(module.__class__) or \"Conv2d\" in str(module.__class__) or \"Conv2d\" in str(module.__class__):\n",
    "                target_modules.append(name)\n",
    "        except Exception:\n",
    "            continue\n",
    "target_modules = list(set(target_modules))\n",
    "freaze_layers = [ layer for layer in target_modules if layer.startswith(\"bert.encoder.layer.\")]\n",
    "#print(\"freaze_layers\",freaze_layers)\n",
    "# `torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`\n",
    "freeze_layers = [\"bert.encoder.layer.1.attention.output.dense\"]\n",
    "config = LoraConfig(\n",
    "        task_type=TaskType.CAUSAL_LM,\n",
    "        target_modules=freaze_layers,\n",
    "        inference_mode=False,\n",
    "        r=8,\n",
    "        lora_alpha=32,\n",
    "        lora_dropout=0.1\n",
    ")\n",
    "model = get_peft_model(model, config)\n",
    "\n",
    "dataset = load_dataset(\"./data_handler.py\",split=\"train\")\n",
    "dataset[0]\n",
    "sdataset = dataset.train_test_split(test_size=0.1)\n",
    "train_dataset = sdataset[\"train\"]\n",
    "val_dataset = sdataset[\"test\"]\n",
    "\n",
    "\n",
    "\n",
    "def process_function(examples):\n",
    "    # print(examples['id'][0],examples['context'][0],examples['question'][0],examples['choice'][0],examples['answer'][0])\n",
    "    context = []\n",
    "    question_choice = []\n",
    "    labels = []\n",
    "    for idx in range(len(examples['id'])):\n",
    "        ctx =  examples['context'][idx][0]\n",
    "        question = examples['question'][idx]\n",
    "        choices = examples['choice'][idx]\n",
    "        for choice in choices:\n",
    "            context.append(ctx)\n",
    "            question_choice.append(question+\" 选项列表:\"+choice)\n",
    "        # 不足5个选项信息，补全到5个。\n",
    "        if len(choices) <5:\n",
    "            for _ in range(5-len(choices)):\n",
    "                context.append(ctx)\n",
    "                question_choice.append(question+\" 选项列表:\"+\"不知道\")\n",
    "        labels.append(choices.index(examples[\"answer\"][idx]))\n",
    "    tokenized_datasets =  tokenizer(context,question_choice,truncation=\"only_first\",max_length=512,padding=\"max_length\")\n",
    "    # for i in  tokenized_datasets['input_ids'][0:5]:\n",
    "        # print(i)\n",
    "    tokenized_datasets = {k:[v[i:i+5] for i in range(0,len(v),5)] for k,v in tokenized_datasets.items()}\n",
    "    # print(len(tokenized_datasets['input_ids']))\n",
    "    tokenized_datasets[\"labels\"] = labels\n",
    "    return tokenized_datasets\n",
    "\n",
    "tokenized_datasets = sdataset.map(process_function, batched=True, remove_columns=sdataset['train'].column_names)\n",
    "accuracy = evaluate.load(\"/code/evaluate-main/metrics/accuracy\")\n",
    "\n",
    "# predictions= [0.2,0,3,0.4,0.1]\n",
    "# labels = [2]\n",
    "# print(compute_metric([predictions,labels]))\n",
    "# res = accuracy.compute(references=[0,1,0,1], predictions=[1,0,0,1])\n",
    "\n",
    "\n",
    "def compute_metric(pred):\n",
    "    predictions ,labels = pred\n",
    "    # print(np.array(predictions).shape,np.array(labels).shape)\n",
    "    predictions = np.argmax(predictions,axis = -1)\n",
    "    # print(accuracy.compute(predictions=predictions,references=labels))\n",
    "    return accuracy.compute(predictions=predictions,references=labels)\n",
    "\n",
    "training_args = TrainingArguments(\n",
    "    output_dir=\"/data/model_for_choise/output\",\n",
    "    per_device_train_batch_size=1,\n",
    "    per_device_eval_batch_size=1,\n",
    "    gradient_accumulation_steps=8,\n",
    "    # eval_strategy='epoch',\n",
    "    # 20 步做一次评估\n",
    "    eval_strategy=\"steps\",\n",
    "    eval_steps=5,\n",
    "    learning_rate=1e-5,\n",
    "    weight_decay=0.01,\n",
    "    warmup_ratio=0.1,\n",
    "    # metric_for_best_model='accuracy',\n",
    "    # load_best_model_at_end=True,\n",
    "    save_strategy='steps',\n",
    "    save_steps=5,\n",
    "    # 多少步打印一下日志信息\n",
    "    logging_steps=100,\n",
    "    num_train_epochs=10\n",
    ")\n",
    "\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=tokenized_datasets['train'],\n",
    "    eval_dataset=tokenized_datasets['test'],\n",
    "    compute_metrics=compute_metric\n",
    ")\n",
    "\n",
    "trainer.train()\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
