{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 基于截断策略的机器阅读理解任务实现"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step1 导入相关包"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:24:25.188377Z",
     "start_time": "2025-09-09T02:24:18.609461Z"
    }
   },
   "source": [
    "import torch\n",
    "import os\n",
    "from datasets import load_dataset, DatasetDict\n",
    "from transformers import AutoTokenizer, AutoModelForQuestionAnswering, TrainingArguments, Trainer, DefaultDataCollator\n",
    "torch.cuda.empty_cache()\n",
    "torch.cuda.set_device(0)"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step2 数据集加载"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:24:26.509730Z",
     "start_time": "2025-09-09T02:24:26.486822Z"
    }
   },
   "source": [
    "# 如果可以联网，直接使用load_dataset进行加载\n",
    "# datasets = load_dataset(\"hfl/cmrc2018\", cache_dir=\"./data\")\n",
    "# datasets.save_to_disk(\"./mrc_data\")\n",
    "# # 如果无法联网，则使用下面的方式加载数据集\n",
    "datasets = DatasetDict.load_from_disk(\"mrc_data\")\n",
    "datasets"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['id', 'context', 'question', 'answers'],\n",
       "        num_rows: 10142\n",
       "    })\n",
       "    validation: Dataset({\n",
       "        features: ['id', 'context', 'question', 'answers'],\n",
       "        num_rows: 3219\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['id', 'context', 'question', 'answers'],\n",
       "        num_rows: 1002\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:34.308263Z",
     "start_time": "2025-09-09T02:14:34.303296Z"
    }
   },
   "source": "datasets[\"train\"][0]",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'id': 'TRAIN_186_QUERY_0',\n",
       " 'context': '范廷颂枢机（，），圣名保禄·若瑟（），是越南罗马天主教枢机。1963年被任为主教；1990年被擢升为天主教河内总教区宗座署理；1994年被擢升为总主教，同年年底被擢升为枢机；2009年2月离世。范廷颂于1919年6月15日在越南宁平省天主教发艳教区出生；童年时接受良好教育后，被一位越南神父带到河内继续其学业。范廷颂于1940年在河内大修道院完成神学学业。范廷颂于1949年6月6日在河内的主教座堂晋铎；及后被派到圣女小德兰孤儿院服务。1950年代，范廷颂在河内堂区创建移民接待中心以收容到河内避战的难民。1954年，法越战争结束，越南民主共和国建都河内，当时很多天主教神职人员逃至越南的南方，但范廷颂仍然留在河内。翌年管理圣若望小修院；惟在1960年因捍卫修院的自由、自治及拒绝政府在修院设政治课的要求而被捕。1963年4月5日，教宗任命范廷颂为天主教北宁教区主教，同年8月15日就任；其牧铭为「我信天主的爱」。由于范廷颂被越南政府软禁差不多30年，因此他无法到所属堂区进行牧灵工作而专注研读等工作。范廷颂除了面对战争、贫困、被当局迫害天主教会等问题外，也秘密恢复修院、创建女修会团体等。1990年，教宗若望保禄二世在同年6月18日擢升范廷颂为天主教河内总教区宗座署理以填补该教区总主教的空缺。1994年3月23日，范廷颂被教宗若望保禄二世擢升为天主教河内总教区总主教并兼天主教谅山教区宗座署理；同年11月26日，若望保禄二世擢升范廷颂为枢机。范廷颂在1995年至2001年期间出任天主教越南主教团主席。2003年4月26日，教宗若望保禄二世任命天主教谅山教区兼天主教高平教区吴光杰主教为天主教河内总教区署理主教；及至2005年2月19日，范廷颂因获批辞去总主教职务而荣休；吴光杰同日真除天主教河内总教区总主教职务。范廷颂于2009年2月22日清晨在河内离世，享年89岁；其葬礼于同月26日上午在天主教河内总教区总主教座堂举行。',\n",
       " 'question': '范廷颂是什么时候被任为主教的？',\n",
       " 'answers': {'text': ['1963年'], 'answer_start': [30]}}"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step3 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:36.943848Z",
     "start_time": "2025-09-09T02:14:36.295844Z"
    }
   },
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"hfl/chinese-macbert-base\")\n",
    "tokenizer"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BertTokenizerFast(name_or_path='hfl/chinese-macbert-base', vocab_size=21128, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'unk_token': '[UNK]', 'sep_token': '[SEP]', 'pad_token': '[PAD]', 'cls_token': '[CLS]', 'mask_token': '[MASK]'}, clean_up_tokenization_spaces=True),  added_tokens_decoder={\n",
       "\t0: AddedToken(\"[PAD]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t100: AddedToken(\"[UNK]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t101: AddedToken(\"[CLS]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t102: AddedToken(\"[SEP]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "\t103: AddedToken(\"[MASK]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
       "}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:39.452874Z",
     "start_time": "2025-09-09T02:14:39.448378Z"
    }
   },
   "source": "sample_dataset = datasets[\"train\"].select(range(10))",
   "outputs": [],
   "execution_count": 5
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:41.076569Z",
     "start_time": "2025-09-09T02:14:41.064560Z"
    }
   },
   "source": [
    "tokenized_examples = tokenizer(text = sample_dataset[\"question\"],\n",
    "\ttext_pair = sample_dataset[\"context\"],\n",
    "\treturn_offsets_mapping = True,\n",
    "\tmax_length = 256, truncation = \"only_second\", padding = \"max_length\")\n",
    "tokenized_examples.keys()"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping'])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 6
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:42.418689Z",
     "start_time": "2025-09-09T02:14:42.414708Z"
    }
   },
   "source": [
    "print(tokenized_examples[\"offset_mapping\"][0], len(tokenized_examples[\"offset_mapping\"][0]))"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 34), (34, 35), (35, 36), (36, 37), (37, 38), (38, 39), (39, 40), (40, 41), (41, 45), (45, 46), (46, 47), (47, 48), (48, 49), (49, 50), (50, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63), (63, 67), (67, 68), (68, 69), (69, 70), (70, 71), (71, 72), (72, 73), (73, 74), (74, 75), (75, 76), (76, 77), (77, 78), (78, 79), (79, 80), (80, 81), (81, 82), (82, 83), (83, 84), (84, 85), (85, 86), (86, 87), (87, 91), (91, 92), (92, 93), (93, 94), (94, 95), (95, 96), (96, 97), (97, 98), (98, 99), (99, 100), (100, 101), (101, 105), (105, 106), (106, 107), (107, 108), (108, 110), (110, 111), (111, 112), (112, 113), (113, 114), (114, 115), (115, 116), (116, 117), (117, 118), (118, 119), (119, 120), (120, 121), (121, 122), (122, 123), (123, 124), (124, 125), (125, 126), (126, 127), (127, 128), (128, 129), (129, 130), (130, 131), (131, 132), (132, 133), (133, 134), (134, 135), (135, 136), (136, 137), (137, 138), (138, 139), (139, 140), (140, 141), (141, 142), (142, 143), (143, 144), (144, 145), (145, 146), (146, 147), (147, 148), (148, 149), (149, 150), (150, 151), (151, 152), (152, 153), (153, 154), (154, 155), (155, 156), (156, 157), (157, 158), (158, 159), (159, 163), (163, 164), (164, 165), (165, 166), (166, 167), (167, 168), (168, 169), (169, 170), (170, 171), (171, 172), (172, 173), (173, 174), (174, 175), (175, 176), (176, 177), (177, 178), (178, 179), (179, 180), (180, 181), (181, 182), (182, 186), (186, 187), (187, 188), (188, 189), (189, 190), (190, 191), (191, 192), (192, 193), (193, 194), (194, 195), (195, 196), (196, 197), (197, 198), (198, 199), (199, 200), (200, 201), (201, 202), (202, 203), (203, 204), (204, 205), (205, 206), (206, 207), (207, 208), (208, 209), (209, 210), (210, 211), (211, 212), (212, 213), (213, 214), (214, 215), (215, 216), (216, 217), (217, 218), (218, 222), (222, 223), (223, 224), (224, 225), (225, 226), (226, 227), (227, 228), (228, 229), (229, 230), (230, 231), (231, 232), (232, 233), (233, 234), (234, 235), (235, 236), (236, 237), (237, 238), (238, 239), (239, 240), (240, 241), (241, 242), (242, 243), (243, 244), (244, 245), (245, 246), (246, 247), (247, 248), (248, 249), (249, 250), (250, 251), (251, 252), (252, 253), (253, 257), (257, 258), (258, 259), (259, 260), (260, 261), (261, 262), (262, 263), (263, 264), (264, 265), (265, 266), (0, 0)] 256\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:44.137876Z",
     "start_time": "2025-09-09T02:14:44.134397Z"
    }
   },
   "source": [
    "offset_mapping = tokenized_examples.pop(\"offset_mapping\")"
   ],
   "outputs": [],
   "execution_count": 8
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:48.236090Z",
     "start_time": "2025-09-09T02:14:48.228793Z"
    }
   },
   "source": [
    "for idx, offset in enumerate(offset_mapping):\n",
    "\tanswer = sample_dataset[idx][\"answers\"]\n",
    "\tstart_char = answer[\"answer_start\"][0]\n",
    "\tend_char = start_char + len(answer[\"text\"][0])\n",
    "\t# 定位答案在token中的起始位置和结束位置\n",
    "\t# 一种策略，我们要拿到context的起始和结束，然后从左右两侧向答案逼近\n",
    "\n",
    "\tcontext_start = tokenized_examples.sequence_ids(idx).index(1)\n",
    "\tcontext_end = tokenized_examples.sequence_ids(idx).index(None, context_start) - 1\n",
    "\n",
    "\t# 判断答案是否在context中\n",
    "\tif offset[context_end][1] < start_char or offset[context_start][0] > end_char:\n",
    "\t\tstart_token_pos = 0\n",
    "\t\tend_token_pos = 0\n",
    "\telse:\n",
    "\t\ttoken_id = context_start\n",
    "\t\twhile token_id <= context_end and offset[token_id][0] < start_char:\n",
    "\t\t\ttoken_id += 1\n",
    "\t\tstart_token_pos = token_id\n",
    "\t\ttoken_id = context_end\n",
    "\t\twhile token_id >= context_start and offset[token_id][1] > end_char:\n",
    "\t\t\ttoken_id -= 1\n",
    "\t\tend_token_pos = token_id\n",
    "\n",
    "\tprint(answer, start_char, end_char, context_start, context_end, start_token_pos, end_token_pos)\n",
    "\tprint(\"token answer decode:\",\n",
    "\t\ttokenizer.decode(tokenized_examples[\"input_ids\"][idx][start_token_pos: end_token_pos + 1]))"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'text': ['1963年'], 'answer_start': [30]} 30 35 17 254 47 48\n",
      "token answer decode: 1963 年\n",
      "{'text': ['1990年被擢升为天主教河内总教区宗座署理'], 'answer_start': [41]} 41 62 15 254 53 70\n",
      "token answer decode: 1990 年 被 擢 升 为 天 主 教 河 内 总 教 区 宗 座 署 理\n",
      "{'text': ['范廷颂于1919年6月15日在越南宁平省天主教发艳教区出生'], 'answer_start': [97]} 97 126 15 254 100 124\n",
      "token answer decode: 范 廷 颂 于 1919 年 6 月 15 日 在 越 南 宁 平 省 天 主 教 发 艳 教 区 出 生\n",
      "{'text': ['1994年3月23日，范廷颂被教宗若望保禄二世擢升为天主教河内总教区总主教并兼天主教谅山教区宗座署理'], 'answer_start': [548]} 548 598 17 254 0 0\n",
      "token answer decode: [CLS]\n",
      "{'text': ['范廷颂于2009年2月22日清晨在河内离世'], 'answer_start': [759]} 759 780 12 254 0 0\n",
      "token answer decode: [CLS]\n",
      "{'text': ['《全美超级模特儿新秀大赛》第十季'], 'answer_start': [26]} 26 42 21 254 47 62\n",
      "token answer decode: 《 全 美 超 级 模 特 儿 新 秀 大 赛 》 第 十 季\n",
      "{'text': ['有前途的新面孔'], 'answer_start': [247]} 247 254 20 254 232 238\n",
      "token answer decode: 有 前 途 的 新 面 孔\n",
      "{'text': ['《Jet》、《东方日报》、《Elle》等'], 'answer_start': [706]} 706 726 20 254 0 0\n",
      "token answer decode: [CLS]\n",
      "{'text': ['售货员'], 'answer_start': [202]} 202 205 18 254 205 207\n",
      "token answer decode: 售 货 员\n",
      "{'text': ['大空翼'], 'answer_start': [84]} 84 87 21 254 105 107\n",
      "token answer decode: 大 空 翼\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:51.009802Z",
     "start_time": "2025-09-09T02:14:51.004720Z"
    }
   },
   "source": [
    "def process_func(examples):\n",
    "\ttokenized_examples = tokenizer(text = examples[\"question\"],\n",
    "\t\ttext_pair = examples[\"context\"],\n",
    "\t\treturn_offsets_mapping = True,\n",
    "\t\tmax_length = 384, truncation = \"only_second\", padding = \"max_length\")\n",
    "\toffset_mapping = tokenized_examples.pop(\"offset_mapping\")\n",
    "\tstart_positions = []\n",
    "\tend_positions = []\n",
    "\tfor idx, offset in enumerate(offset_mapping):\n",
    "\t\tanswer = examples[\"answers\"][idx]\n",
    "\t\tstart_char = answer[\"answer_start\"][0]\n",
    "\t\tend_char = start_char + len(answer[\"text\"][0])\n",
    "\t\t# 定位答案在token中的起始位置和结束位置\n",
    "\t\t# 一种策略，我们要拿到context的起始和结束，然后从左右两侧向答案逼近\n",
    "\t\tcontext_start = tokenized_examples.sequence_ids(idx).index(1)\n",
    "\t\tcontext_end = tokenized_examples.sequence_ids(idx).index(None, context_start) - 1\n",
    "\t\t# 判断答案是否在context中\n",
    "\t\tif offset[context_end][1] < start_char or offset[context_start][0] > end_char:\n",
    "\t\t\tstart_token_pos = 0\n",
    "\t\t\tend_token_pos = 0\n",
    "\t\telse:\n",
    "\t\t\ttoken_id = context_start\n",
    "\t\t\twhile token_id <= context_end and offset[token_id][0] < start_char:\n",
    "\t\t\t\ttoken_id += 1\n",
    "\t\t\tstart_token_pos = token_id\n",
    "\t\t\ttoken_id = context_end\n",
    "\t\t\twhile token_id >= context_start and offset[token_id][1] > end_char:\n",
    "\t\t\t\ttoken_id -= 1\n",
    "\t\t\tend_token_pos = token_id\n",
    "\t\tstart_positions.append(start_token_pos)\n",
    "\t\tend_positions.append(end_token_pos)\n",
    "\n",
    "\ttokenized_examples[\"start_positions\"] = start_positions\n",
    "\ttokenized_examples[\"end_positions\"] = end_positions\n",
    "\treturn tokenized_examples"
   ],
   "outputs": [],
   "execution_count": 11
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:14:58.914190Z",
     "start_time": "2025-09-09T02:14:53.898538Z"
    }
   },
   "source": [
    "tokenied_datasets = datasets.map(process_func, batched = True, remove_columns = datasets[\"train\"].column_names)\n",
    "tokenied_datasets"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Map:   0%|          | 0/10142 [00:00<?, ? examples/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "27e18d662eda44959e56a99437b7e141"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "data": {
      "text/plain": [
       "Map:   0%|          | 0/3219 [00:00<?, ? examples/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "9e6a0ff919e44b479a4231b4d6f4d305"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "data": {
      "text/plain": [
       "Map:   0%|          | 0/1002 [00:00<?, ? examples/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "800924cd767e4d2ab0bed53ba32d3fc9"
      }
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": null
     }
    },
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'],\n",
       "        num_rows: 10142\n",
       "    })\n",
       "    validation: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'],\n",
       "        num_rows: 3219\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'],\n",
       "        num_rows: 1002\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 12
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step4 加载模型"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:15:02.157541Z",
     "start_time": "2025-09-09T02:15:01.120793Z"
    }
   },
   "source": [
    "model = AutoModelForQuestionAnswering.from_pretrained(\"hfl/chinese-macbert-base\")\n",
    "def ensure_weights_contiguous(model):\n",
    "\t# model.named_parameters(): 获取模型中所有命名参数\n",
    "\tfor name, param in model.named_parameters():\n",
    "\t\t# param.is_contiguous(): 检查参数在内存中是否连续存储\n",
    "\t\tif not param.is_contiguous():\n",
    "\t\t\tprint(f\"Making {name} contiguous.\")\n",
    "\t\t\t# param.data.contiguous(): 创建连续存储版本并替换原数据\n",
    "\t\t\tparam.data = param.data.contiguous()\n",
    "ensure_weights_contiguous(model)"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForQuestionAnswering were not initialized from the model checkpoint at hfl/chinese-macbert-base and are newly initialized: ['qa_outputs.bias', 'qa_outputs.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Making bert.encoder.layer.0.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.0.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.0.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.0.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.0.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.0.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.1.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.1.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.2.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.2.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.3.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.3.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.4.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.4.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.5.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.5.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.6.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.6.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.7.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.7.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.8.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.8.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.9.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.9.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.10.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.10.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.11.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.11.output.dense.weight contiguous.\n"
     ]
    }
   ],
   "execution_count": 13
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step5 配置TrainingArguments"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:15:05.364500Z",
     "start_time": "2025-09-09T02:15:05.266270Z"
    }
   },
   "source": [
    "args = TrainingArguments(\n",
    "\toutput_dir = \"models_for_qa\",\n",
    "\tper_device_train_batch_size = 64,\n",
    "\tper_device_eval_batch_size = 128,\n",
    "\tgradient_accumulation_steps = 4,  # 梯度累积步数\n",
    "\tgradient_checkpointing = True,  # 启用梯度检查点以节省内存\n",
    "\tgradient_checkpointing_kwargs = {\"use_reentrant\": False},  # 推荐设置\n",
    "\toptim = \"adafactor\",  # 使用Adafactor优化器\n",
    "\tfp16 = True,  # 使用16位浮点数训练\n",
    "\teval_strategy = \"epoch\",\n",
    "\tsave_strategy = \"epoch\",\n",
    "\tlogging_steps = 50,\n",
    "\tnum_train_epochs = 1\n",
    ")"
   ],
   "outputs": [],
   "execution_count": 14
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step6 配置Trainer"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:15:09.837699Z",
     "start_time": "2025-09-09T02:15:09.513020Z"
    }
   },
   "source": [
    "trainer = Trainer(\n",
    "\tmodel = model,\n",
    "\targs = args,\n",
    "\ttokenizer = tokenizer,\n",
    "\ttrain_dataset = tokenied_datasets[\"train\"],\n",
    "\teval_dataset = tokenied_datasets[\"validation\"],\n",
    "\tdata_collator = DefaultDataCollator()\n",
    ")"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\86134\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\accelerate\\accelerator.py:488: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n"
     ]
    }
   ],
   "execution_count": 15
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step7 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:22:59.320371Z",
     "start_time": "2025-09-09T02:15:13.318575Z"
    }
   },
   "source": [
    "trainer.train()"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ],
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='39' max='39' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [39/39 07:32, Epoch 0/1]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Epoch</th>\n",
       "      <th>Training Loss</th>\n",
       "      <th>Validation Loss</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>No log</td>\n",
       "      <td>1.484577</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ]
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": "c66225635e403c1c4c9a3bf0679012e4"
     }
    },
    {
     "data": {
      "text/plain": [
       "TrainOutput(global_step=39, training_loss=2.7764425033178086, metrics={'train_runtime': 463.3324, 'train_samples_per_second': 21.889, 'train_steps_per_second': 0.084, 'total_flos': 1956590114439168.0, 'train_loss': 2.7764425033178086, 'epoch': 0.9811320754716981})"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 16
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step8 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:23:03.174324Z",
     "start_time": "2025-09-09T02:23:02.297765Z"
    }
   },
   "source": [
    "from transformers import pipeline\n",
    "\n",
    "\n",
    "pipe = pipeline(\"question-answering\", model = model, tokenizer = tokenizer, device = 0)"
   ],
   "outputs": [],
   "execution_count": 17
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T02:23:05.229700Z",
     "start_time": "2025-09-09T02:23:05.176675Z"
    }
   },
   "source": "pipe(question = \"小明在哪里上班？\", context = \"小明在北京上班。\")",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'score': 0.3110268712043762, 'start': 3, 'end': 5, 'answer': '北京'}"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 18
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "transformers",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
