{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import datasets\n",
    "import evaluate\n",
    "import json\n",
    "from datasets import load_dataset\n",
    "from cmrc_eval import evaluate_cmrc\n",
    "import collections\n",
    "from datasets import load_dataset, DatasetDict\n",
    "from transformers import DefaultDataCollator\n",
    "from transformers import AutoTokenizer, AutoModelForQuestionAnswering, Trainer, TrainingArguments"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # c3数据集\n",
    "# dataset_dir = \"/data/datasets/clue_c3\"\n",
    "# datasets =  load_dataset(path=dataset_dir)\n",
    "# datasets[\"train\"][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataset = load_dataset(\"dataset_creator.py\",split=\"train\")\n",
    "dataset[0]\n",
    "sdataset = dataset.train_test_split(test_size=0.1)\n",
    "train_dataset = sdataset[\"train\"]\n",
    "val_dataset = sdataset[\"test\"]\n",
    "sdataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import evaluate\n",
    "from transformers import AutoTokenizer,AutoModelForMultipleChoice,TrainingArguments,Trainer\n",
    "sdataset[\"train\"][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"/data/models/huggingface/chinese-macbert-large\")\n",
    "tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_function(examples):\n",
    "    # print(examples['id'][0],examples['context'][0],examples['question'][0],examples['choice'][0],examples['answer'][0])\n",
    "    context = []\n",
    "    question_choice = []\n",
    "    labels = []\n",
    "    for idx in range(len(examples['id'])):\n",
    "        ctx =  examples['context'][idx][0]\n",
    "        question = examples['question'][idx]\n",
    "        choices = examples['choice'][idx]\n",
    "        for choice in choices:\n",
    "            context.append(ctx)\n",
    "            question_choice.append(question+\" 选项列表:\"+choice)\n",
    "        # 不足5个选项信息，补全到5个。\n",
    "        if len(choices) <5:\n",
    "            for _ in range(5-len(choices)):\n",
    "                context.append(ctx)\n",
    "                question_choice.append(question+\" 选项列表:\"+\"不知道\")\n",
    "        labels.append(choices.index(examples[\"answer\"][idx]))\n",
    "    tokenized_datasets =  tokenizer(context,question_choice,truncation=\"only_first\",max_length=512,padding=\"max_length\")\n",
    "    # for i in  tokenized_datasets['input_ids'][0:5]:\n",
    "        # print(i)\n",
    "    tokenized_datasets = {k:[v[i:i+5] for i in range(0,len(v),5)] for k,v in tokenized_datasets.items()}\n",
    "    # print(len(tokenized_datasets['input_ids']))\n",
    "    tokenized_datasets[\"labels\"] = labels\n",
    "    return tokenized_datasets\n",
    "tokenized_datasets = sdataset.map(process_function, batched=True, remove_columns=sdataset['train'].column_names)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_datasets[\"train\"][5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "np.array(tokenized_datasets[\"train\"][\"input_ids\"]).shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model =  AutoModelForMultipleChoice.from_pretrained(\"/data/models/huggingface/chinese-macbert-large\")\n",
    "# 把模型中的所有bias进行调参\n",
    "num_param = 0 \n",
    "not_train_params = 0\n",
    "for name,param in model.named_parameters():\n",
    "    if 'bias' not in name: \n",
    "        param.requires_grad = False\n",
    "        not_train_params+= param.numel()\n",
    "    else:\n",
    "        num_param += param.numel()\n",
    "print(num_param,not_train_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import evaluate\n",
    "import numpy as np\n",
    "# accuracy = evaluate.load(\"accuracy\") # 不起作用，最好下载直接使用\n",
    "accuracy = evaluate.load(\"/code/evaluate-main/metrics/accuracy\")\n",
    "# res = accuracy.compute(references=[0,1,0,1], predictions=[1,0,0,1])\n",
    "# print(evaluate.load('accuracy').compute(references=[1], predictions=[1]))\n",
    "def compute_metric(pred):\n",
    "    predictions ,labels = pred\n",
    "    predictions = np.argmax(predictions,axis = -1)\n",
    "    return accuracy.compute(predictions=predictions,references=labels)\n",
    "tokenized_datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "training_args = TrainingArguments(\n",
    "    output_dir=\"/data/model_for_choise/output\",\n",
    "    per_device_train_batch_size=1,\n",
    "    per_device_eval_batch_size=1,\n",
    "    gradient_accumulation_steps=8,\n",
    "    # eval_strategy='epoch',\n",
    "    # 20 步做一次评估\n",
    "    eval_strategy=\"steps\",\n",
    "    eval_steps=100,\n",
    "    learning_rate=5e-5,\n",
    "    weight_decay=0.01,\n",
    "    warmup_ratio=0.1,\n",
    "    metric_for_best_model='accuracy',\n",
    "    load_best_model_at_end=True,\n",
    "    save_strategy='steps',\n",
    "    save_steps=100,\n",
    "    # 多少步打印一下日志信息\n",
    "    logging_steps=100,\n",
    "    num_train_epochs=10\n",
    ")\n",
    "\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=tokenized_datasets['train'],\n",
    "    eval_dataset=tokenized_datasets['test'],\n",
    "    compute_metrics=compute_metric,\n",
    "    data_collator=DefaultDataCollator()\n",
    ")\n",
    "\n",
    "trainer.train()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
