{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 基于截断策略的机器阅读理解任务实现"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 导入相关包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "from datasets import load_dataset,DatasetDict\n",
    "from transformers import AutoTokenizer, AutoModelForQuestionAnswering,Trainer,TrainingArguments"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据集加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# datasets = load_dataset(\"/data/datasets/cmrc2018\")\n",
    "# datasets[\"train\"][10]\n",
    "datasets = DatasetDict.load_from_disk('/data/datasets/cmrc2018')\n",
    "print(datasets[\"train\"][100])\n",
    "print(datasets[\"train\"][100]['context'][371:376])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"/data/models/huggingface/chinese-macbert-base\")\n",
    "#  is_split_into_words 决定了词和句子的处理方式\n",
    "tokenizer\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(type(datasets)) # <class 'datasets.dataset_dict.DatasetDict'>\n",
    "sample_dataset = datasets[\"train\"].select(range(20)) # 获取前10个数据组成一个小的数据集\n",
    "# 'id', 'context', 'question', 'answers'\n",
    "# sample_dataset 是一个列表组成的字典\n",
    "print(\"用于数据处理测试的数据集合的数量是\",len(sample_dataset),type(sample_dataset))\n",
    "print(\"数据集中的第一个数据是\",sample_dataset[0])\n",
    "print(\"这个数据的key有\",sample_dataset[0].keys())\n",
    "print(\"这个数据中answers的回答形式是\",sample_dataset['answers'][0].keys()) \n",
    "# # <class 'datasets.arrow_dataset.Dataset'> 该数据集类可以采用字典的方法获取信息\n",
    "print(sample_dataset[\"id\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 处理之后的数据集可以像字典一样处理，不过格式是先写key然后再写序号，类似 tokenizer_examples[\"input_ids\"][0] 代表了第0条数据的input_ids值 。\n",
    "tokenizer_examples = tokenizer(\n",
    "    text=sample_dataset[\"question\"],\n",
    "    text_pair=sample_dataset[\"context\"],\n",
    "    return_offsets_mapping=True,\n",
    "    max_length=512,\n",
    "    padding=\"max_length\",\n",
    "    truncation=\"only_second\"\n",
    ")\n",
    "# 101 到 102 中间是问题 102 到结尾的102 中间的部分是content\n",
    "# dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping'])\n",
    "print(\"tokenizer_examples 类型\",type(tokenizer_examples),tokenizer_examples.keys())\n",
    "# <class 'transformers.tokenization_utils_base.BatchEncoding'> 类型可以像字典一样处理\n",
    "offset_mappings = tokenizer_examples.pop('offset_mapping')\n",
    "len(offset_mappings)\n",
    "# datasets[\"train\"]\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 解释一下各个字段的含义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据以 101开头，中间加入102分割，最终以102结尾，除了问题和内容的编码，其余的部分有3个特殊位置。\n",
    "print(tokenizer_examples.keys())\n",
    "example = tokenizer_examples['input_ids'][1]\n",
    "print(\"单个数据的长度是512\",len(example))\n",
    "idx_101=[idx for idx in range(len(example)) if 101 == example[idx]]\n",
    "print(\"101 所在位置\",idx_101)\n",
    "idx_102=[idx for idx in range(len(example)) if 102 == example[idx]]\n",
    "print(\"102所在位置\",idx_102)\n",
    "# 从101到第一个102之间的编码是问题的,第一个102到第二个102之间的编码是内容\n",
    "# print(example[\"question\"])\n",
    "question =  sample_dataset['question'][1]   \n",
    "print(\"内容\",len(sample_dataset[\"context\"][1]),sample_dataset[\"context\"][1])\n",
    "print(\"问题\",question)\n",
    "print(\"offset 内容\",len(offset_mappings),offset_mappings[1])\n",
    "#  数据以 101开头，中间加入102分割，最终以102结尾，除了问题和内容的编码，其余的部分有3个特殊位置。\n",
    "print('转换后内容',example)\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定位答案在token中的位置（拿到context的起始和结束位置，然后让现有的开头和结尾字符进行逼近）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如何取得内容的部分呢，在example第一个102之后开始计数，或者从offset第二个(0,0)开始计数，这两个都是可以的。\n",
    "# 这里我们从offset开始寻找\n",
    "# 取得 经过转换之后的offset mapping信息\n",
    "# 循环这些mapping信息\n",
    "for idx,offset in enumerate(offset_mappings):\n",
    "    #由于mapping信息和样本信息是一一对应的，所以可以直接用索引来取得对应的答案信息\n",
    "    awswer = sample_dataset[idx]['answers']\n",
    "    #取得答案的起始位置\n",
    "    start_char = awswer['answer_start'][0]\n",
    "    # 接上面 这个数据中answers的回答形式是 dict_keys(['text', 'answer_start'])\n",
    "    end_char = start_char + len(awswer['text'][0])\n",
    "    print(\"token 索引和答案字符位置\",idx,start_char,end_char)\n",
    "    token_start  = tokenizer_examples.sequence_ids(idx).index(1)\n",
    "    # 正确做法 找到最后一个None的位置,减去 1 \n",
    "    token_end = tokenizer_examples.sequence_ids(idx).index(None,token_start) - 1\n",
    "    tokenizer_examples.sequence_ids(idx)\n",
    "    print(\"offset_mappings 长度 {} 和 sequence_ids 长度 {} 一一对应\".format(len(offset),len(tokenizer_examples.sequence_ids(idx))))\n",
    "    # \n",
    "    print(\"sequence_ids 对应信息 ,注意这里指的是 token的 id \",token_start,token_end)\n",
    "    # 我们找到搜索的起始位置和结束位置\n",
    "    print(offset[token_start],offset[token_end])\n",
    "    if offset[token_start][0] > end_char and offset[token_end][1] < start_char:\n",
    "        start_token_pos = 0\n",
    "        end_token_pos = 0\n",
    "    else:\n",
    "        start_token_pos = token_start\n",
    "        token_id = token_start\n",
    "        while token_id <= token_end and offset[token_id][0] < start_char :\n",
    "            token_id += 1\n",
    "            start_token_pos = token_id\n",
    "\n",
    "        token_id = token_end\n",
    "        while token_id >= token_start and offset[token_id][1] > end_char :\n",
    "            token_id -= 1\n",
    "            end_token_pos = token_id\n",
    "    # print(\"########\",)\n",
    "    print(\"答案 \",tokenizer.decode(tokenizer_examples[\"input_ids\"][idx][start_token_pos:end_token_pos+1]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    # datasets[\"train\"][0] datasets 是一个由train val test 三个key组成的字典的数据集，每个数据集中都有若干条数据，组成一个数据列表。\n",
    "    # 以 train数据集为例子 datasets[\"train\"] 列表中的每一个元素都是一个字典，字典的key是id,context,question,answers，代表其中一条数据集中的数据。\n",
    "    # process_func 在处理数据的时候，是将 datasets[\"train\"] 数据列表进行一个zip操作。\n",
    "    # examples 是一个字典，字典的key是 id, context, question, answers ,每一个key 都对应一个列表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "{'id': 'TRAIN_186_QUERY_0',\n",
    " 'context': '范廷颂枢机（，），圣名保禄·若瑟（），是越南罗马天主教枢机。1963年被任为主教；1990年被擢升为天主教河内总教区宗座署理；1994年被擢升为总主教，同年年底被擢升为枢机；2009年2月离世。范廷颂于1919年6月15日在越南宁平省天主教发艳教区出生；童年时接受良好教育后，被一位越南神父带到河内继续其学业。范廷颂于1940年在河内大修道院完成神学学业。范廷颂于1949年6月6日在河内的主教座堂晋铎；及后被派到圣女小德兰孤儿院服务。1950年代，范廷颂在河内堂区创建移民接待中心以收容到河内避战的难民。1954年，法越战争结束，越南民主共和国建都河内，当时很多天主教神职人员逃至越南的南方，但范廷颂仍然留在河内。翌年管理圣若望小修院；惟在1960年因捍卫修院的自由、自治及拒绝政府在修院设政治课的要求而被捕。1963年4月5日，教宗任命范廷颂为天主教北宁教区主教，同年8月15日就任；其牧铭为「我信天主的爱」。由于范廷颂被越南政府软禁差不多30年，因此他无法到所属堂区进行牧灵工作而专注研读等工作。范廷颂除了面对战争、贫困、被当局迫害天主教会等问题外，也秘密恢复修院、创建女修会团体等。1990年，教宗若望保禄二世在同年6月18日擢升范廷颂为天主教河内总教区宗座署理以填补该教区总主教的空缺。1994年3月23日，范廷颂被教宗若望保禄二世擢升为天主教河内总教区总主教并兼天主教谅山教区宗座署理；同年11月26日，若望保禄二世擢升范廷颂为枢机。范廷颂在1995年至2001年期间出任天主教越南主教团主席。2003年4月26日，教宗若望保禄二世任命天主教谅山教区兼天主教高平教区吴光杰主教为天主教河内总教区署理主教；及至2005年2月19日，范廷颂因获批辞去总主教职务而荣休；吴光杰同日真除天主教河内总教区总主教职务。范廷颂于2009年2月22日清晨在河内离世，享年89岁；其葬礼于同月26日上午在天主教河内总教区总主教座堂举行。',\n",
    " 'question': '范廷颂是什么时候被任为主教的？',\n",
    " 'answers': {'text': ['1963年'], 'answer_start': [30]}}\n",
    "\n",
    "datasets_test  = [\n",
    "    {\"id\":\"TRAIN_186_QUERY_0\",\n",
    "       \"context\":\"context1\",\n",
    "       \"questions\":\"aaaa1\",\n",
    "       \"answer\":{'text': ['1961年'], 'answer_start': [10]}},\n",
    "    {\"id\":\"TRAIN_186_QUERY_1\",\n",
    "       \"context\":\"context2\",\n",
    "       \"questions\":\"aaaa2\",\n",
    "       \"answer\":{'text': ['1962年'], 'answer_start': [20]}},\n",
    "    {\"id\":\"TRAIN_186_QUERY_2\",\n",
    "       \"context\":\"context3\",\n",
    "       \"questions\":\"aaaa3\",\n",
    "       \"answer\":{'text': ['1963年'], 'answer_start': [30]}}\n",
    "       ]\n",
    "print([i[\"id\"] for i in datasets_test])\n",
    "examples_test = {\n",
    "    \"id\":[i[\"id\"] for i in datasets_test],\n",
    "    \"questions\":[i[\"questions\"] for i in datasets_test],\n",
    "    \"answer\":[i[\"answer\"] for i in datasets_test]\n",
    "}\n",
    "\n",
    "examples_test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#  数据映射"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_func(examples):\n",
    "    tokenizer_examples = tokenizer(\n",
    "        text=examples[\"question\"],\n",
    "        text_pair=examples[\"context\"],\n",
    "        return_offsets_mapping=True,\n",
    "        max_length=512,\n",
    "        padding=\"max_length\",\n",
    "        truncation=\"only_second\"\n",
    "    )\n",
    "    offset_mappings = tokenizer_examples.pop('offset_mapping')\n",
    "    start_positions = []\n",
    "    end_positions = []\n",
    "    for idx,offset in enumerate(offset_mappings):\n",
    "        #由于mapping信息和样本信息是一一对应的，所以可以直接用索引来取得对应的答案信息\n",
    "        awswer = examples['answers'][idx]\n",
    "        #取得答案的起始位置\n",
    "        start_char = awswer['answer_start'][0]\n",
    "        # 接上面 这个数据中answers的回答形式是 dict_keys(['text', 'answer_start'])\n",
    "        end_char = start_char + len(awswer['text'][0])\n",
    "        token_start  = tokenizer_examples.sequence_ids(idx).index(1)\n",
    "        # 正确做法 找到最后一个None的位置,减去 1 \n",
    "        token_end = tokenizer_examples.sequence_ids(idx).index(None,token_start) - 1\n",
    "\n",
    "        if offset[token_start][0] > end_char and offset[token_end][1] < start_char:\n",
    "            start_token_pos = 0\n",
    "            end_token_pos = 0\n",
    "        else:\n",
    "            start_token_pos = token_start\n",
    "            token_id = token_start\n",
    "            while token_id <= token_end and offset[token_id][0] < start_char :\n",
    "                token_id += 1\n",
    "            start_token_pos = token_id\n",
    "            token_id = token_end\n",
    "            while token_id >= token_start and offset[token_id][1] > end_char :\n",
    "                token_id -= 1\n",
    "            end_token_pos = token_id\n",
    "        start_positions.append(start_token_pos)\n",
    "        end_positions.append(end_token_pos)\n",
    "\n",
    "    tokenizer_examples[\"start_positions\"] = start_positions\n",
    "    tokenizer_examples[\"end_positions\"] = end_positions\n",
    "    return tokenizer_examples"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenied_datasets = datasets.map(process_func, batched=True, remove_columns=datasets['train'].column_names)\n",
    "tokenied_datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenied_datasets[\"train\"][0]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model  = AutoModelForQuestionAnswering.from_pretrained(\"/data/models/huggingface/chinese-macbert-base\")\n",
    "model.config.num_labels"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 配置 TrainingArguments"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "training_args = TrainingArguments(\n",
    "    output_dir=\"/data/model_for_qa/output\",\n",
    "    per_device_train_batch_size=8,\n",
    "    per_device_eval_batch_size=4,\n",
    "    gradient_accumulation_steps=2,\n",
    "    eval_strategy='epoch',\n",
    "    learning_rate=5e-5,\n",
    "    weight_decay=0.01,\n",
    "    warmup_ratio=0.1,\n",
    "    # metric_for_best_model='f1',\n",
    "    # load_best_model_at_end=True,\n",
    "    logging_steps=100,\n",
    "    num_train_epochs=5\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 创建 Trainer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import DefaultDataCollator\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=tokenied_datasets['train'],\n",
    "    eval_dataset=tokenied_datasets['validation'],\n",
    "    data_collator=DefaultDataCollator()\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import pipeline\n",
    "pipe = pipeline(\"question-answering\",model=model,tokenizer=tokenizer,device=0)\n",
    "pipe(question=\"小明在哪里上的飞机？\",context=\"小明去中国，由于定了到北京的机票，所以先坐车去了大阪，然后从大阪上的飞机，坐了飞机去了北京。\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
