{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 相关包安装"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#pip install nltk\n",
    "import nltk\n",
    "nltk.download('punkt')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 导入相关包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset,DatasetDict\n",
    "from transformers import AutoTokenizer, AutoModelForQuestionAnswering,Trainer,TrainingArguments"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据集加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "datasets = DatasetDict.load_from_disk('/data/datasets/cmrc2018')\n",
    "print(datasets[\"train\"][100])\n",
    "print(datasets[\"train\"][100]['context'][371:376])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"/data/models/huggingface/chinese-macbert-base\")\n",
    "#  is_split_into_words 决定了词和句子的处理方式\n",
    "tokenizer.decode(tokenizer(datasets[\"train\"][100]['context'])['input_ids'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sample_dataset = datasets[\"train\"].select(range(10)) # 获取前10个数据组成一个小的数据集\n",
    "# 'id', 'context', 'question', 'answers'\n",
    "\n",
    "print(\"用于数据处理测试的数据集合的数量是\",len(sample_dataset))\n",
    "print(\"数据集中的第一个数据是\",sample_dataset[0])\n",
    "print(\"这个数据的key有\",sample_dataset[0].keys())\n",
    "print(\"这个数据中answers的回答形式是\",sample_dataset[0]['answers'].keys())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 处理之后的数据集可以像字典一样处理，不过格式是先写key然后再写序号，类似 tokenizer_examples[\"input_ids\"][0] 代表了第0条数据的input_ids值 。\n",
    "tokenizer_examples = tokenizer(\n",
    "    text=sample_dataset[\"question\"],\n",
    "    text_pair=sample_dataset[\"context\"],\n",
    "    return_offsets_mapping=True,\n",
    "    # 增加滑动窗口\n",
    "    return_overflowing_tokens=True,\n",
    "    # 滑动窗口中下一行保留上一个句子中多少个字符，默认是0，也就是不保留\n",
    "    stride=128,\n",
    "    max_length=384,\n",
    "    padding=\"max_length\",\n",
    "    truncation=\"only_second\"\n",
    ")\n",
    "# 101 到 102 中间是问题 102 到结尾的102 中间的部分是content\n",
    "# dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping'])\n",
    "tokenizer_examples.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 原始的10条数据被切成25条数据，overflow_to_sample_mapping代表了切分后的样本对应原始样本的索引\n",
    "print(tokenizer_examples[\"overflow_to_sample_mapping\"])\n",
    "for  sen in tokenizer_examples[\"input_ids\"]:\n",
    "    print(tokenizer.decode(sen))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#  offset_mapping 记录了每个token在原始文本中的位置，非常方便\n",
    "print(tokenizer_examples['offset_mapping'][:3])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer_examples = tokenizer(\n",
    "        text=sample_dataset[\"question\"],\n",
    "        text_pair=sample_dataset[\"context\"],\n",
    "        return_offsets_mapping=True,\n",
    "        # 增加滑动窗口\n",
    "        return_overflowing_tokens=True,\n",
    "        # 滑动窗口中下一行保留上一个句子中多少个字符，默认是0，也就是不保留\n",
    "        stride=128,\n",
    "        max_length=384,\n",
    "        padding=\"max_length\",\n",
    "        truncation=\"only_second\"\n",
    "    ) \n",
    "\n",
    "sample_mapping  =  tokenizer_examples.pop(\"overflow_to_sample_mapping\")\n",
    "len(sample_mapping)  \n",
    "print(sample_mapping)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_func(examples):\n",
    "    tokenizer_examples = tokenizer(\n",
    "        text=examples[\"question\"],\n",
    "        text_pair=examples[\"context\"],\n",
    "        return_offsets_mapping=True,\n",
    "        # 增加滑动窗口\n",
    "        return_overflowing_tokens=True,\n",
    "        # 滑动窗口中下一行保留上一个句子中多少个字符，默认是0，也就是不保留\n",
    "        stride=128,\n",
    "        max_length=384,\n",
    "        padding=\"max_length\",\n",
    "        truncation=\"only_second\"\n",
    "    ) \n",
    "\n",
    "    sample_mapping  =  tokenizer_examples.pop(\"overflow_to_sample_mapping\")\n",
    "    start_positions = []\n",
    "    end_positions = []\n",
    "    examples_ids =  []\n",
    "    # \n",
    "    for idx,_ in enumerate(sample_mapping):\n",
    "        # print(len(examples['answers']),sample_mapping)\n",
    "        # sample_mapping 记录了29个问题中和10个问题的索引对应关系。因此要取得问题的答案，只需要找到对应的索引就行了\n",
    "        awswer = examples['answers'][sample_mapping[idx]]\n",
    "        \n",
    "        start_char = awswer['answer_start'][0]\n",
    "        end_char = start_char + len(awswer['text'][0])\n",
    "        # 以上是跟awswer有关的部分因此不需要修改。\n",
    "        content_start  = tokenizer_examples.sequence_ids(idx).index(1)\n",
    "        content_end = tokenizer_examples.sequence_ids(idx).index(None,content_start) - 1\n",
    "        offset  = tokenizer_examples.get('offset_mapping')[idx]\n",
    "    \n",
    "        if offset[content_end][1]<start_char or offset[content_start][0]>end_char:\n",
    "            # print(\"答案不在范围内\")\n",
    "            start_token_pos = 0\n",
    "            end_token_pos = 0\n",
    "        else:\n",
    "            token_id = content_start\n",
    "            while token_id <= content_end and offset[token_id][0] < start_char :\n",
    "                token_id += 1\n",
    "            start_token_pos = token_id\n",
    "            token_id = content_end\n",
    "            while token_id >= content_start and offset[token_id][1] > end_char :\n",
    "                token_id -= 1\n",
    "            end_token_pos = token_id\n",
    "\n",
    "        start_positions.append(start_token_pos)\n",
    "        end_positions.append(end_token_pos)\n",
    "        examples_ids.append(examples['id'][sample_mapping[idx]])\n",
    "        # 预测时候使用\n",
    "        tokenizer_examples['offset_mapping'][idx] = [\n",
    "            (o if tokenizer_examples.sequence_ids(idx)[k] == 1 else None)\n",
    "            for k,o in enumerate(tokenizer_examples['offset_mapping'][idx])\n",
    "        ]\n",
    "        \n",
    "    tokenizer_examples[\"examples_ids\"] = examples_ids\n",
    "    tokenizer_examples[\"start_positions\"] = start_positions\n",
    "    tokenizer_examples[\"end_positions\"] = end_positions\n",
    "    return tokenizer_examples"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#  数据映射"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_datasets = datasets.map(process_func, batched=True, remove_columns=datasets['train'].column_names)\n",
    "tokenized_datasets[\"train\"][0]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 查看完成后的数据信息"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(tokenized_datasets[\"train\"][6][\"offset_mapping\"])\n",
    "print(tokenized_datasets[\"train\"][6][\"start_positions\"])\n",
    "print(tokenized_datasets[\"train\"][6][\"end_positions\"])\n",
    "print(tokenized_datasets[\"train\"][6][\"input_ids\"])\n",
    "print(tokenized_datasets[\"train\"][6][\"token_type_ids\"])\n",
    "print(tokenized_datasets[\"train\"][6][\"attention_mask\"])\n",
    "print(tokenized_datasets[\"train\"][6][\"examples_ids\"])\n",
    "print(tokenized_datasets[\"train\"][6][\"input_ids\"][100:125])\n",
    "tokenizer.decode(tokenized_datasets[\"train\"][6][\"input_ids\"][100:125])\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 获取模型的输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import collections\n",
    "example_to_feature = collections.defaultdict(list)\n",
    "for idx , example_id in enumerate(tokenized_datasets[\"train\"][\"examples_ids\"][:10]):\n",
    "    example_to_feature[example_id].append(idx)\n",
    "print(example_to_feature)\n",
    "\n",
    "import numpy as np\n",
    "import collections\n",
    "def  get_result(start_logits,end_logits,examples,features):\n",
    "    predictions = []\n",
    "    references = []\n",
    "    example_to_feature =  collections.defaultdict(list) \n",
    "    for idx , example_id in enumerate(features[\"examples_ids\"]):\n",
    "        example_to_feature[example_id].append(idx)\n",
    "\n",
    "    # 最有答案的优选\n",
    "    n_best = 20\n",
    "    max_answer_length = 30\n",
    "\n",
    "    for example in examples:\n",
    "        example_id=  example[\"id\"]\n",
    "        context=  example[\"context\"]\n",
    "        answers = []\n",
    "        for feature_idx in example_to_feature[example_id]:\n",
    "            start_logit = start_logits[feature_idx]\n",
    "            end_logit = end_logits[feature_idx]\n",
    "            offset =  features[feature_idx][\"offset_mapping\"]\n",
    "            start_indexes = np.argsort(start_logit)[::-1][:n_best].tolist()\n",
    "            end_indexes = np.argsort(end_logit)[::-1][:n_best].tolist()\n",
    "            for start_index in start_indexes:\n",
    "                for end_index in end_indexes:\n",
    "                    if offset[start_index] is None or offset[end_index] is None:\n",
    "                        continue\n",
    "                    if end_index < start_index or end_index - start_index + 1 > max_answer_length:\n",
    "                        continue\n",
    "                    answers.append(\n",
    "                        {\"text\" : context[offset[start_index][0]:offset[end_index][1]],\n",
    "                        \"score\" : start_logit[start_index] + end_logit[end_index]}\n",
    "                      )\n",
    "        if len(answers) >0:\n",
    "            best_answer = max(answers,key=lambda x:x[\"score\"])\n",
    "            predictions[\"example_id\"] =  best_answer[\"text\"]\n",
    "        else:\n",
    "            predictions[\"example_id\"] = \"\"\n",
    "        references[example_id] = example[\"answers\"][\"text\"]\n",
    "    return predictions,references"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from cmrc_eval import evaluate_cmrc\n",
    "def compute_metrics(preds):\n",
    "    start_logits,end_logits = preds[0]\n",
    "    if start_logits.shape[0] == len(tokenied_datasets[\"validation\"]):\n",
    "        p,r = get_result(start_logits,end_logits,datasets[\"validation\"],tokenied_datasets[\"test\"])\n",
    "    else:\n",
    "        p,r = get_result(start_logits,end_logits,datasets[\"test\"],tokenied_datasets[\"test\"])\n",
    "    return evaluate_cmrc(p,r)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model  = AutoModelForQuestionAnswering.from_pretrained(\"/data/models/huggingface/chinese-macbert-base\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 配置 TrainingArguments"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "training_args = TrainingArguments(\n",
    "    output_dir=\"/data/model_for_qa/output\",\n",
    "    per_device_train_batch_size=8,\n",
    "    per_device_eval_batch_size=4,\n",
    "    gradient_accumulation_steps=2,\n",
    "    eval_strategy='epoch',\n",
    "    learning_rate=5e-5,\n",
    "    weight_decay=0.01,\n",
    "    warmup_ratio=0.1,\n",
    "    # metric_for_best_model='f1',\n",
    "    # load_best_model_at_end=True,\n",
    "    logging_steps=100,\n",
    "    num_train_epochs=5\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 创建 Trainer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import DefaultDataCollator\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=tokenied_datasets['train'],\n",
    "    eval_dataset=tokenied_datasets['validation'],\n",
    "    data_collator=DefaultDataCollator(),\n",
    "    compute_metrics=compute_metrics\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import pipeline\n",
    "pipe = pipeline(\"question-answering\",model=model,tokenizer=tokenizer,device=0)\n",
    "print(pipe(question=\"小明在哪里上的飞机？\",context=\"小明去中国，由于定了到北京的机票，所以先坐车去了大阪，然后从大阪上的飞机，坐了飞机去了北京。\"))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
