{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "62b2b45e",
   "metadata": {},
   "source": [
    "## 基于截断策略"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ccd769e",
   "metadata": {},
   "source": [
    "### step1:导入相关包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7ed6fef7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\31811\\.conda\\envs\\transformers\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "from datasets import load_dataset,load_from_disk\n",
    "from transformers import AutoTokenizer,AutoModelForQuestionAnswering,TrainingArguments,Trainer,DefaultDataCollator"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "522ca589",
   "metadata": {},
   "source": [
    "### step2:数据集加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "63653634",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['id', 'context', 'question', 'answers'],\n",
       "        num_rows: 10142\n",
       "    })\n",
       "    validation: Dataset({\n",
       "        features: ['id', 'context', 'question', 'answers'],\n",
       "        num_rows: 3219\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['id', 'context', 'question', 'answers'],\n",
       "        num_rows: 1002\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "datasets_=load_from_disk(\"data/mrc_data\")\n",
    "datasets_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8deb7a89",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'id': 'TRAIN_36_QUERY_1',\n",
       " 'context': 'NGC 6231是一个位于天蝎座的疏散星团，天球座标为赤经16时54分，赤纬-41度48分，视觉观测大小约45角分，亮度约2.6视星等，距地球5900光年。NGC 6231年龄约为三百二十万年，是一个非常年轻的星团，星团内的最亮星是5等的天蝎座 ζ1星。用双筒望远镜或小型望远镜就能看到个别的行星。NGC 6231在1654年被意大利天文学家乔瓦尼·巴蒂斯特·霍迪尔纳（Giovanni Battista Hodierna）以Luminosae的名字首次纪录在星表中，但是未见记载于夏尔·梅西耶的天体列表和威廉·赫歇尔的深空天体目录。这个天体在1678年被爱德蒙·哈雷（I.7）、1745年被夏西亚科斯（Jean-Phillippe Loys de Cheseaux）（9）、1751年被尼可拉·路易·拉卡伊（II.13）分别再次独立发现。',\n",
       " 'question': 'NGC 6231的年龄是多少？',\n",
       " 'answers': {'text': ['约为三百二十万年'], 'answer_start': [88]}}"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "datasets_[\"train\"][13]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b9937438",
   "metadata": {},
   "source": [
    "### step3：数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "2c916b84",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer=AutoTokenizer.from_pretrained(r\"D:\\HuggFace_model\\chinese-macbert-base\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c7aee37a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping'])"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\"\"\"示例数据\"\"\"\n",
    "samples_dataset=datasets_[\"train\"].select(range(10))\n",
    "tokenized_examples=tokenizer(text=samples_dataset[\"question\"],\n",
    "                             text_pair=samples_dataset[\"context\"],\n",
    "                             max_length=512,truncation=\"only_second\",#截断策略，只截断文本内容，inputids对应的字在词表映射，\n",
    "                             return_offsets_mapping=True,#通过 offset_mapping 找到这些 token 在原始文本中的具体字符位置\n",
    "                             padding=\"max_length\"\n",
    "                             ) #token_type_ids：区分上下文,在这里0是问题，1是文本内容\n",
    "tokenized_examples.keys()\n",
    "#attention_mask:忽略填充符；"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7a3c564a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(101, 0), (5745, 0), (2455, 0), (7563, 0), (3221, 0), (784, 0), (720, 0), (3198, 0), (952, 0), (6158, 0), (818, 0), (711, 0), (712, 0), (3136, 0), (4638, 0), (8043, 0), (102, 0), (5745, 1), (2455, 1), (7563, 1), (3364, 1), (3322, 1), (8020, 1), (8024, 1), (8021, 1), (8024, 1), (1760, 1), (1399, 1), (924, 1), (4882, 1), (185, 1), (5735, 1), (4449, 1), (8020, 1), (8021, 1), (8024, 1), (3221, 1), (6632, 1), (1298, 1), (5384, 1), (7716, 1), (1921, 1), (712, 1), (3136, 1), (3364, 1), (3322, 1), (511, 1), (9155, 1), (2399, 1), (6158, 1), (818, 1), (711, 1), (712, 1), (3136, 1), (8039, 1), (8431, 1), (2399, 1), (6158, 1), (3091, 1), (1285, 1), (711, 1), (1921, 1), (712, 1), (3136, 1), (3777, 1), (1079, 1), (2600, 1), (3136, 1), (1277, 1), (2134, 1), (2429, 1), (5392, 1), (4415, 1), (8039, 1), (8447, 1), (2399, 1), (6158, 1), (3091, 1), (1285, 1), (711, 1), (2600, 1), (712, 1), (3136, 1), (8024, 1), (1398, 1), (2399, 1), (2399, 1), (2419, 1), (6158, 1), (3091, 1), (1285, 1), (711, 1), (3364, 1), (3322, 1), (8039, 1), (8170, 1), (2399, 1), (123, 1), (3299, 1), (4895, 1), (686, 1), (511, 1), (5745, 1), (2455, 1), (7563, 1), (754, 1), (9915, 1), (2399, 1), (127, 1), (3299, 1), (8115, 1), (3189, 1), (1762, 1), (6632, 1), (1298, 1), (2123, 1), (2398, 1), (4689, 1), (1921, 1), (712, 1), (3136, 1), (1355, 1), (5683, 1), (3136, 1), (1277, 1), (1139, 1), (4495, 1), (8039, 1), (4997, 1), (2399, 1), (3198, 1), (2970, 1), (1358, 1), (5679, 1), (1962, 1), (3136, 1), (5509, 1), (1400, 1), (8024, 1), (6158, 1), (671, 1), (855, 1), (6632, 1), (1298, 1), (4868, 1), (4266, 1), (2372, 1), (1168, 1), (3777, 1), (1079, 1), (5326, 1), (5330, 1), (1071, 1), (2110, 1), (689, 1), (511, 1), (5745, 1), (2455, 1), (7563, 1), (754, 1), (9211, 1), (2399, 1), (1762, 1), (3777, 1), (1079, 1), (1920, 1), (934, 1), (6887, 1), (7368, 1), (2130, 1), (2768, 1), (4868, 1), (2110, 1), (2110, 1), (689, 1), (511, 1), (5745, 1), (2455, 1), (7563, 1), (754, 1), (8594, 1), (2399, 1), (127, 1), (3299, 1), (127, 1), (3189, 1), (1762, 1), (3777, 1), (1079, 1), (4638, 1), (712, 1), (3136, 1), (2429, 1), (1828, 1), (3232, 1), (7195, 1), (8039, 1), (1350, 1), (1400, 1), (6158, 1), (3836, 1), (1168, 1), (1760, 1), (1957, 1), (2207, 1), (2548, 1), (1065, 1), (2109, 1), (1036, 1), (7368, 1), (3302, 1), (1218, 1), (511, 1), (8707, 1), (2399, 1), (807, 1), (8024, 1), (5745, 1), (2455, 1), (7563, 1), (1762, 1), (3777, 1), (1079, 1), (1828, 1), (1277, 1), (1158, 1), (2456, 1), (4919, 1), (3696, 1), (2970, 1), (2521, 1), (704, 1), (2552, 1), (809, 1), (3119, 1), (2159, 1), (1168, 1), (3777, 1), (1079, 1), (6912, 1), (2773, 1), (4638, 1), (7410, 1), (3696, 1), (511, 1), (9258, 1), (2399, 1), (8024, 1), (3791, 1), (6632, 1), (2773, 1), (751, 1), (5310, 1), (3338, 1), (8024, 1), (6632, 1), (1298, 1), (3696, 1), (712, 1), (1066, 1), (1469, 1), (1744, 1), (2456, 1), (6963, 1), (3777, 1), (1079, 1), (8024, 1), (2496, 1), (3198, 1), (2523, 1), (1914, 1), (1921, 1), (712, 1), (3136, 1), (4868, 1), (5466, 1), (782, 1), (1447, 1), (6845, 1), (5635, 1), (6632, 1), (1298, 1), (4638, 1), (1298, 1), (3175, 1), (8024, 1), (852, 1), (5745, 1), (2455, 1), (7563, 1), (793, 1), (4197, 1), (4522, 1), (1762, 1), (3777, 1), (1079, 1), (511, 1), (5422, 1), (2399, 1), (5052, 1), (4415, 1), (1760, 1), (5735, 1), (3307, 1), (2207, 1), (934, 1), (7368, 1), (8039, 1), (2668, 1), (1762, 1), (8779, 1), (2399, 1), (1728, 1), (2932, 1), (1310, 1), (934, 1), (7368, 1), (4638, 1), (5632, 1), (4507, 1), (510, 1), (5632, 1), (3780, 1), (1350, 1), (2867, 1), (5318, 1), (3124, 1), (2424, 1), (1762, 1), (934, 1), (7368, 1), (6392, 1), (3124, 1), (3780, 1), (6440, 1), (4638, 1), (6206, 1), (3724, 1), (5445, 1), (6158, 1), (2936, 1), (511, 1), (9155, 1), (2399, 1), (125, 1), (3299, 1), (126, 1), (3189, 1), (8024, 1), (3136, 1), (2134, 1), (818, 1), (1462, 1), (5745, 1), (2455, 1), (7563, 1), (711, 1), (1921, 1), (712, 1), (3136, 1), (1266, 1), (2123, 1), (3136, 1), (1277, 1), (712, 1), (3136, 1), (8024, 1), (1398, 1), (2399, 1), (129, 1), (3299, 1), (8115, 1), (3189, 1), (2218, 1), (818, 1), (8039, 1), (1071, 1), (4288, 1), (7208, 1), (711, 1), (519, 1), (2769, 1), (928, 1), (1921, 1), (712, 1), (4638, 1), (4263, 1), (520, 1), (511, 1), (4507, 1), (754, 1), (5745, 1), (2455, 1), (7563, 1), (6158, 1), (6632, 1), (1298, 1), (3124, 1), (2424, 1), (6763, 1), (4881, 1), (2345, 1), (679, 1), (1914, 1), (8114, 1), (2399, 1), (8024, 1), (1728, 1), (3634, 1), (800, 1), (3187, 1), (3791, 1), (1168, 1), (2792, 1), (2247, 1), (1828, 1), (1277, 1), (6822, 1), (6121, 1), (4288, 1), (4130, 1), (2339, 1), (868, 1), (5445, 1), (683, 1), (3800, 1), (4777, 1), (6438, 1), (5023, 1), (2339, 1), (868, 1), (511, 1), (5745, 1), (2455, 1), (7563, 1), (7370, 1), (749, 1), (7481, 1), (2190, 1), (2773, 1), (751, 1), (510, 1), (6577, 1), (1737, 1), (510, 1), (6158, 1), (2496, 1), (2229, 1), (6833, 1), (2154, 1), (1921, 1), (712, 1), (3136, 1), (833, 1), (5023, 1), (7309, 1), (7579, 1), (1912, 1), (8024, 1), (738, 1), (4908, 1), (2166, 1), (2612, 1), (1908, 1), (934, 1), (7368, 1), (510, 1), (1158, 1), (2456, 1), (1957, 1), (934, 1), (833, 1), (1730, 1), (860, 1), (5023, 1), (511, 1), (8431, 1), (2399, 1), (8024, 1), (3136, 1), (2134, 1), (5735, 1), (3307, 1), (924, 1), (4882, 1), (753, 1), (686, 1), (1762, 1), (1398, 1), (2399, 1), (127, 1), (3299, 1), (8123, 1), (3189, 1), (3091, 1), (1285, 1), (5745, 1), (2455, 1), (7563, 1), (711, 1), (1921, 1), (712, 1), (3136, 1), (3777, 1), (1079, 1), (2600, 1), (3136, 1), (1277, 1), (2134, 1), (2429, 1), (5392, 1), (102, 1)]\n"
     ]
    }
   ],
   "source": [
    "print(list(zip(tokenized_examples['input_ids'][0],tokenized_examples[\"token_type_ids\"][0])))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "bcaccda4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 34), (34, 35), (35, 36), (36, 37), (37, 38), (38, 39), (39, 40), (40, 41), (41, 45), (45, 46), (46, 47), (47, 48), (48, 49), (49, 50), (50, 51), (51, 52), (52, 53), (53, 54), (54, 55), (55, 56), (56, 57), (57, 58), (58, 59), (59, 60), (60, 61), (61, 62), (62, 63), (63, 67), (67, 68), (68, 69), (69, 70), (70, 71), (71, 72), (72, 73), (73, 74), (74, 75), (75, 76), (76, 77), (77, 78), (78, 79), (79, 80), (80, 81), (81, 82), (82, 83), (83, 84), (84, 85), (85, 86), (86, 87), (87, 91), (91, 92), (92, 93), (93, 94), (94, 95), (95, 96), (96, 97), (97, 98), (98, 99), (99, 100), (100, 101), (101, 105), (105, 106), (106, 107), (107, 108), (108, 110), (110, 111), (111, 112), (112, 113), (113, 114), (114, 115), (115, 116), (116, 117), (117, 118), (118, 119), (119, 120), (120, 121), (121, 122), (122, 123), (123, 124), (124, 125), (125, 126), (126, 127), (127, 128), (128, 129), (129, 130), (130, 131), (131, 132), (132, 133), (133, 134), (134, 135), (135, 136), (136, 137), (137, 138), (138, 139), (139, 140), (140, 141), (141, 142), (142, 143), (143, 144), (144, 145), (145, 146), (146, 147), (147, 148), (148, 149), (149, 150), (150, 151), (151, 152), (152, 153), (153, 154), (154, 155), (155, 156), (156, 157), (157, 158), (158, 159), (159, 163), (163, 164), (164, 165), (165, 166), (166, 167), (167, 168), (168, 169), (169, 170), (170, 171), (171, 172), (172, 173), (173, 174), (174, 175), (175, 176), (176, 177), (177, 178), (178, 179), (179, 180), (180, 181), (181, 182), (182, 186), (186, 187), (187, 188), (188, 189), (189, 190), (190, 191), (191, 192), (192, 193), (193, 194), (194, 195), (195, 196), (196, 197), (197, 198), (198, 199), (199, 200), (200, 201), (201, 202), (202, 203), (203, 204), (204, 205), (205, 206), (206, 207), (207, 208), (208, 209), (209, 210), (210, 211), (211, 212), (212, 213), (213, 214), (214, 215), (215, 216), (216, 217), (217, 218), (218, 222), (222, 223), (223, 224), (224, 225), (225, 226), (226, 227), (227, 228), (228, 229), (229, 230), (230, 231), (231, 232), (232, 233), (233, 234), (234, 235), (235, 236), (236, 237), (237, 238), (238, 239), (239, 240), (240, 241), (241, 242), (242, 243), (243, 244), (244, 245), (245, 246), (246, 247), (247, 248), (248, 249), (249, 250), (250, 251), (251, 252), (252, 253), (253, 257), (257, 258), (258, 259), (259, 260), (260, 261), (261, 262), (262, 263), (263, 264), (264, 265), (265, 266), (266, 267), (267, 268), (268, 269), (269, 270), (270, 271), (271, 272), (272, 273), (273, 274), (274, 275), (275, 276), (276, 277), (277, 278), (278, 279), (279, 280), (280, 281), (281, 282), (282, 283), (283, 284), (284, 285), (285, 286), (286, 287), (287, 288), (288, 289), (289, 290), (290, 291), (291, 292), (292, 293), (293, 294), (294, 295), (295, 296), (296, 297), (297, 298), (298, 299), (299, 300), (300, 301), (301, 302), (302, 303), (303, 304), (304, 305), (305, 306), (306, 307), (307, 308), (308, 309), (309, 310), (310, 311), (311, 312), (312, 313), (313, 314), (314, 315), (315, 316), (316, 317), (317, 318), (318, 319), (319, 320), (320, 321), (321, 325), (325, 326), (326, 327), (327, 328), (328, 329), (329, 330), (330, 331), (331, 332), (332, 333), (333, 334), (334, 335), (335, 336), (336, 337), (337, 338), (338, 339), (339, 340), (340, 341), (341, 342), (342, 343), (343, 344), (344, 345), (345, 346), (346, 347), (347, 348), (348, 349), (349, 350), (350, 351), (351, 352), (352, 353), (353, 354), (354, 355), (355, 356), (356, 360), (360, 361), (361, 362), (362, 363), (363, 364), (364, 365), (365, 366), (366, 367), (367, 368), (368, 369), (369, 370), (370, 371), (371, 372), (372, 373), (373, 374), (374, 375), (375, 376), (376, 377), (377, 378), (378, 379), (379, 380), (380, 381), (381, 382), (382, 383), (383, 384), (384, 385), (385, 386), (386, 387), (387, 388), (388, 390), (390, 391), (391, 392), (392, 393), (393, 394), (394, 395), (395, 396), (396, 397), (397, 398), (398, 399), (399, 400), (400, 401), (401, 402), (402, 403), (403, 404), (404, 405), (405, 406), (406, 407), (407, 408), (408, 409), (409, 410), (410, 411), (411, 412), (412, 413), (413, 414), (414, 415), (415, 416), (416, 417), (417, 418), (418, 419), (419, 420), (420, 421), (421, 422), (422, 424), (424, 425), (425, 426), (426, 427), (427, 428), (428, 429), (429, 430), (430, 431), (431, 432), (432, 433), (433, 434), (434, 435), (435, 436), (436, 437), (437, 438), (438, 439), (439, 440), (440, 441), (441, 442), (442, 443), (443, 444), (444, 445), (445, 446), (446, 447), (447, 448), (448, 449), (449, 450), (450, 451), (451, 452), (452, 453), (453, 454), (454, 455), (455, 456), (456, 457), (457, 458), (458, 459), (459, 460), (460, 461), (461, 462), (462, 463), (463, 464), (464, 465), (465, 466), (466, 467), (467, 468), (468, 469), (469, 470), (470, 471), (471, 472), (472, 473), (473, 474), (474, 475), (475, 476), (476, 477), (477, 478), (478, 479), (479, 480), (480, 481), (481, 482), (482, 483), (483, 484), (484, 485), (485, 486), (486, 487), (487, 488), (488, 489), (489, 490), (490, 491), (491, 492), (492, 493), (493, 494), (494, 495), (495, 499), (499, 500), (500, 501), (501, 502), (502, 503), (503, 504), (504, 505), (505, 506), (506, 507), (507, 508), (508, 509), (509, 510), (510, 511), (511, 512), (512, 513), (513, 514), (514, 516), (516, 517), (517, 518), (518, 519), (519, 520), (520, 521), (521, 522), (522, 523), (523, 524), (524, 525), (525, 526), (526, 527), (527, 528), (528, 529), (529, 530), (530, 531), (531, 532), (532, 533), (533, 534), (0, 0)] 512\n"
     ]
    }
   ],
   "source": [
    "print(tokenized_examples[\"offset_mapping\"][0],len(tokenized_examples[\"offset_mapping\"][0]))\n",
    "#0-15是问题，后面是文本"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fe6298c1",
   "metadata": {},
   "source": [
    "offset（即 offset_mapping）是 token 与原始文本字符位置的“对照表”"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "2a863ed7",
   "metadata": {},
   "outputs": [],
   "source": [
    "offset_mapping=tokenized_examples.pop('offset_mapping')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "8de7f828",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'text': ['1963年'], 'answer_start': [30]} 30 35 17 510 47 48\n",
      "token answer decode： 1963 年\n",
      "{'text': ['1990年被擢升为天主教河内总教区宗座署理'], 'answer_start': [41]} 41 62 15 510 53 70\n",
      "token answer decode： 1990 年 被 擢 升 为 天 主 教 河 内 总 教 区 宗 座 署 理\n",
      "{'text': ['范廷颂于1919年6月15日在越南宁平省天主教发艳教区出生'], 'answer_start': [97]} 97 126 15 510 100 124\n",
      "token answer decode： 范 廷 颂 于 1919 年 6 月 15 日 在 越 南 宁 平 省 天 主 教 发 艳 教 区 出 生\n",
      "{'text': ['1994年3月23日，范廷颂被教宗若望保禄二世擢升为天主教河内总教区总主教并兼天主教谅山教区宗座署理'], 'answer_start': [548]} 548 598 17 510 0 0\n",
      "token answer decode： [CLS]\n",
      "{'text': ['范廷颂于2009年2月22日清晨在河内离世'], 'answer_start': [759]} 759 780 12 510 0 0\n",
      "token answer decode： [CLS]\n",
      "{'text': ['《全美超级模特儿新秀大赛》第十季'], 'answer_start': [26]} 26 42 21 510 47 62\n",
      "token answer decode： 《 全 美 超 级 模 特 儿 新 秀 大 赛 》 第 十 季\n",
      "{'text': ['有前途的新面孔'], 'answer_start': [247]} 247 254 20 510 232 238\n",
      "token answer decode： 有 前 途 的 新 面 孔\n",
      "{'text': ['《Jet》、《东方日报》、《Elle》等'], 'answer_start': [706]} 706 726 20 510 0 0\n",
      "token answer decode： [CLS]\n",
      "{'text': ['售货员'], 'answer_start': [202]} 202 205 18 510 205 207\n",
      "token answer decode： 售 货 员\n",
      "{'text': ['大空翼'], 'answer_start': [84]} 84 87 21 486 105 107\n",
      "token answer decode： 大 空 翼\n"
     ]
    }
   ],
   "source": [
    "for idx,offset in enumerate(offset_mapping):\n",
    "    answer=samples_dataset[idx]['answers']\n",
    "    start_char=answer['answer_start'][0]\n",
    "    end_char=start_char+len(answer['text'][0])\n",
    "    # 定位答案在token中的起始位置和结束位置\n",
    "    # 一种策略，我们要拿到context的起始与结束，然后从左右两侧向答案逼近\n",
    "    context_start=tokenized_examples.sequence_ids(idx).index(1)\n",
    "    \"\"\"\n",
    "    sequence_ids(idx).index(1)：找到列表中第一个值为1的位置，即上下文部分的第一个 token 索引\n",
    "    \"\"\"\n",
    "    context_end=tokenized_examples.sequence_ids(idx).index(None,context_start)-1\n",
    "    \"\"\"\n",
    "    sequence_ids(idx).index(None, context_start)：从 context_start 开始，\n",
    "    找到第一个值为 None 的位置（即上下文结束后的 [SEP] 或 [PAD]）。\n",
    "    减1后得到上下文的最后一个 token 索引（因为 None 是上下文结束后的符号）\n",
    "    \"\"\"\n",
    "    \n",
    "    # 判断答案是否在context中\n",
    "\n",
    "    #原文结束字符索引<答案的开头字符索引  或者  原文起始字符字符>答案结束字符  ===========答案不存在  \n",
    "    if offset[context_end][1] <start_char or offset[context_start][0]>end_char:\n",
    "        start_token_pos=0\n",
    "        end_token_pos=0\n",
    "    \n",
    "    else:    #答案存在\n",
    "        token_id=context_start#直接把token_id放在文章的开始字符\n",
    "        while token_id<=context_end and offset[token_id][0]<start_char:#当token_id没到原文末尾且匹配表还没到答案开头\n",
    "            token_id+=1\n",
    "        start_token_pos=token_id\n",
    "\n",
    "        token_id=context_end#再把token_id放在文章的结尾字符\n",
    "        while token_id>=context_start and offset[token_id][1]>end_char:#当token_id没到文章开头，且匹配表还没到答案末尾\n",
    "            token_id-=1\n",
    "        end_token_pos=token_id\n",
    "    \"\"\"\n",
    "    start_char/end_char 基于原始文本的字符索引（如中文的每个汉字、英文的每个字母），\n",
    "    而 start_token_pos/end_token_pos 基于tokenizer分词后的token索引（如BPE分词的子词、中文的单字/多字组合）。\n",
    "    二者本质是不同“坐标系”，只有在 “1个token严格对应1个字符” 时才会“数值对应”，但实际中这种情况极少。\n",
    "    \"\"\"\n",
    "\n",
    "    print(answer,start_char,end_char,context_start,context_end,start_token_pos,end_token_pos)#字符索引：左闭右开\n",
    "    print(\"token answer decode：\",tokenizer.decode(tokenized_examples[\"input_ids\"][idx][start_token_pos:end_token_pos+1]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "6f04d0d5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_func(examples):\n",
    "    tokenized_examples=tokenizer(text=examples[\"question\"],\n",
    "                             text_pair=examples[\"context\"],\n",
    "                             max_length=512,truncation=\"only_second\",#截断策略，只截断文本内容，inputids对应的字在词表映射，\n",
    "                             return_offsets_mapping=True,#通过 offset_mapping 找到这些 token 在原始文本中的具体字符位置\n",
    "                             padding=\"max_length\"\n",
    "                             ) #token_type_ids：区分上下文,在这里0是问题，1是文本内容\n",
    "    offset_mapping=tokenized_examples.pop('offset_mapping')\n",
    "    start_positions=[]\n",
    "    end_positions=[]\n",
    "    for idx,offset in enumerate(offset_mapping):\n",
    "        answer=examples['answers'][idx]\n",
    "        start_char=answer['answer_start'][0]\n",
    "        end_char=start_char+len(answer['text'][0])\n",
    "        # 定位答案在token中的起始位置和结束位置\n",
    "        # 一种策略，我们要拿到context的起始与结束，然后从左右两侧向答案逼近\n",
    "        context_start=tokenized_examples.sequence_ids(idx).index(1)\n",
    "        \"\"\"\n",
    "        sequence_ids(idx).index(1)：找到列表中第一个值为1的位置，即上下文部分的第一个 token 索引\n",
    "        \"\"\"\n",
    "        context_end=tokenized_examples.sequence_ids(idx).index(None,context_start)-1\n",
    "        # \"\"\"\n",
    "        # sequence_ids(idx).index(None, context_start)：从 context_start 开始，\n",
    "        # 找到第一个值为 None 的位置（即上下文结束后的 [SEP] 或 [PAD]）。\n",
    "        # 减1后得到上下文的最后一个 token 索引（因为 None 是上下文结束后的符号）\n",
    "        # \"\"\"\n",
    "            \n",
    "            # 判断答案是否在context中\n",
    "\n",
    "            #原文结束字符索引<答案的开头字符索引  或者  原文起始字符字符>答案结束字符  ===========答案不存在  \n",
    "        if offset[context_end][1] <start_char or offset[context_start][0]>end_char:\n",
    "            start_token_pos=0\n",
    "            end_token_pos=0\n",
    "        \n",
    "        else:    #答案存在\n",
    "            token_id=context_start#直接把token_id放在文章的开始字符\n",
    "            while token_id<=context_end and offset[token_id][0]<start_char:#当token_id没到原文末尾且匹配表还没到答案开头\n",
    "                token_id+=1\n",
    "            start_token_pos=token_id\n",
    "\n",
    "            token_id=context_end#再把token_id放在文章的结尾字符\n",
    "            while token_id>=context_start and offset[token_id][1]>end_char:#当token_id没到文章开头，且匹配表还没到答案末尾\n",
    "                token_id-=1\n",
    "            end_token_pos=token_id\n",
    "        \"\"\"\n",
    "        start_char/end_char 基于原始文本的字符索引（如中文的每个汉字、英文的每个字母），\n",
    "        而 start_token_pos/end_token_pos 基于tokenizer分词后的token索引（如BPE分词的子词、中文的单字/多字组合）。\n",
    "        二者本质是不同“坐标系”，只有在 “1个token严格对应1个字符” 时才会“数值对应”，但实际中这种情况极少。\n",
    "        \"\"\"\n",
    "        start_positions.append(start_token_pos)\n",
    "        end_positions.append(end_token_pos)\n",
    "        tokenized_examples[\"start_positions\"]=start_positions\n",
    "        tokenized_examples[\"end_positions\"]=end_positions\n",
    "    return tokenized_examples   \n",
    "\n",
    "       \n",
    "\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "dd467e46",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'],\n",
       "        num_rows: 10142\n",
       "    })\n",
       "    validation: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'],\n",
       "        num_rows: 3219\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'],\n",
       "        num_rows: 1002\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenized_datasets=datasets_.map(process_func,batched=True,remove_columns=datasets_['train'].column_names)\n",
    "tokenized_datasets"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3f4acddc",
   "metadata": {},
   "source": [
    "### step4:加载模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "2ecea883",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForQuestionAnswering were not initialized from the model checkpoint at D:\\HuggFace_model\\chinese-macbert-base and are newly initialized: ['qa_outputs.bias', 'qa_outputs.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "model=AutoModelForQuestionAnswering.from_pretrained(r\"D:\\HuggFace_model\\chinese-macbert-base\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5208be44",
   "metadata": {},
   "source": [
    "### step5:配置训练参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "09efd0a1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\31811\\.conda\\envs\\transformers\\lib\\site-packages\\transformers\\training_args.py:1494: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "args=TrainingArguments(\n",
    "    output_dir=\"moudle_for_QA\",\n",
    "    per_device_train_batch_size=32,\n",
    "    per_device_eval_batch_size=32,\n",
    "    evaluation_strategy=\"epoch\",\n",
    "    save_strategy=\"epoch\",\n",
    "    load_best_model_at_end=True,\n",
    "    logging_steps=50,#每50batch打印一次\n",
    "    num_train_epochs=3\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2c6d7a82",
   "metadata": {},
   "source": [
    "### step6:训练器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "a641e285",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer=Trainer(\n",
    "    model=model,#模型\n",
    "    args=args,#参数\n",
    "    tokenizer=tokenizer,\n",
    "    train_dataset=tokenized_datasets[\"train\"],\n",
    "    eval_dataset=tokenized_datasets[\"validation\"],\n",
    "\n",
    "    data_collator=DefaultDataCollator()\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "29798796",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|▌         | 50/951 [01:03<18:42,  1.25s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 3.4139, 'grad_norm': 16.848711013793945, 'learning_rate': 4.737118822292324e-05, 'epoch': 0.16}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|█         | 100/951 [02:06<18:02,  1.27s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 1.845, 'grad_norm': 15.035244941711426, 'learning_rate': 4.474237644584648e-05, 'epoch': 0.32}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 16%|█▌        | 150/951 [03:10<16:48,  1.26s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 1.5594, 'grad_norm': 13.738184928894043, 'learning_rate': 4.211356466876972e-05, 'epoch': 0.47}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 21%|██        | 200/951 [04:13<15:59,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 1.4183, 'grad_norm': 11.484691619873047, 'learning_rate': 3.9484752891692956e-05, 'epoch': 0.63}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 26%|██▋       | 250/951 [05:16<14:47,  1.27s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 1.339, 'grad_norm': 9.507486343383789, 'learning_rate': 3.6855941114616195e-05, 'epoch': 0.79}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 32%|███▏      | 300/951 [06:20<13:51,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 1.3756, 'grad_norm': 9.06861400604248, 'learning_rate': 3.4227129337539433e-05, 'epoch': 0.95}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "                                                 \n",
      " 33%|███▎      | 317/951 [07:50<13:07,  1.24s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'eval_loss': 1.0333220958709717, 'eval_runtime': 69.3352, 'eval_samples_per_second': 46.427, 'eval_steps_per_second': 1.457, 'epoch': 1.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 37%|███▋      | 350/951 [08:34<12:44,  1.27s/it]  "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 1.0675, 'grad_norm': 9.41031551361084, 'learning_rate': 3.159831756046267e-05, 'epoch': 1.1}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 42%|████▏     | 400/951 [09:39<11:51,  1.29s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 1.0058, 'grad_norm': 8.648605346679688, 'learning_rate': 2.8969505783385907e-05, 'epoch': 1.26}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 47%|████▋     | 450/951 [10:43<10:40,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.9393, 'grad_norm': 12.38996410369873, 'learning_rate': 2.6340694006309152e-05, 'epoch': 1.42}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 53%|█████▎    | 500/951 [11:48<09:54,  1.32s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.9498, 'grad_norm': 11.170249938964844, 'learning_rate': 2.3711882229232387e-05, 'epoch': 1.58}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 58%|█████▊    | 550/951 [12:52<08:31,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.9267, 'grad_norm': 11.401511192321777, 'learning_rate': 2.1083070452155626e-05, 'epoch': 1.74}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 63%|██████▎   | 600/951 [13:56<07:30,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.9195, 'grad_norm': 12.737837791442871, 'learning_rate': 1.8454258675078864e-05, 'epoch': 1.89}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "                                                 \n",
      " 67%|██████▋   | 634/951 [15:48<06:35,  1.25s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'eval_loss': 1.0393221378326416, 'eval_runtime': 68.3777, 'eval_samples_per_second': 47.077, 'eval_steps_per_second': 1.477, 'epoch': 2.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 68%|██████▊   | 650/951 [16:10<06:54,  1.38s/it]  "
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.8573, 'grad_norm': 11.764029502868652, 'learning_rate': 1.5825446898002103e-05, 'epoch': 2.05}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 74%|███████▎  | 700/951 [17:13<05:19,  1.27s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.6031, 'grad_norm': 10.61959457397461, 'learning_rate': 1.3196635120925343e-05, 'epoch': 2.21}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 79%|███████▉  | 750/951 [18:18<04:18,  1.29s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.6202, 'grad_norm': 13.507978439331055, 'learning_rate': 1.056782334384858e-05, 'epoch': 2.37}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 84%|████████▍ | 800/951 [19:21<03:13,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.6203, 'grad_norm': 14.975358009338379, 'learning_rate': 7.93901156677182e-06, 'epoch': 2.52}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 89%|████████▉ | 850/951 [20:25<02:08,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.5777, 'grad_norm': 9.80474853515625, 'learning_rate': 5.310199789695059e-06, 'epoch': 2.68}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 95%|█████████▍| 900/951 [21:29<01:05,  1.27s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.6292, 'grad_norm': 7.570751190185547, 'learning_rate': 2.6813880126182968e-06, 'epoch': 2.84}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████▉| 950/951 [22:33<00:01,  1.28s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.5961, 'grad_norm': 10.553766250610352, 'learning_rate': 5.257623554153523e-08, 'epoch': 3.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "                                                 \n",
      "100%|██████████| 951/951 [23:44<00:00,  1.25s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'eval_loss': 1.1618680953979492, 'eval_runtime': 68.285, 'eval_samples_per_second': 47.141, 'eval_steps_per_second': 1.479, 'epoch': 3.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 951/951 [23:45<00:00,  1.50s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'train_runtime': 1425.9085, 'train_samples_per_second': 21.338, 'train_steps_per_second': 0.667, 'train_loss': 1.1184304595683026, 'epoch': 3.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "TrainOutput(global_step=951, training_loss=1.1184304595683026, metrics={'train_runtime': 1425.9085, 'train_samples_per_second': 21.338, 'train_steps_per_second': 0.667, 'total_flos': 7950215120449536.0, 'train_loss': 1.1184304595683026, 'epoch': 3.0})"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cd44ee82",
   "metadata": {},
   "source": [
    "### 加载模型与使用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "c69a4d1a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import pipeline\n",
    "pipe=pipeline(\"question-answering\",model=\"moudle_for_QA\\checkpoint-951\",tokenizer=tokenizer,device=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "956c4c0f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'score': 0.6518198847770691, 'start': 14, 'end': 19, 'answer': '四川峨眉山'}"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pipe(question=\"我祖籍是哪里\",context=\"我叫李云，今年二十八岁，来自四川峨眉山\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e981a032",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "transformers",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.23"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
