import numpy as np
from cmrc_eval import evaluate_cmrc
import collections
from datasets import load_dataset, DatasetDict
from transformers import DefaultDataCollator
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, Trainer, TrainingArguments


def process_func(examples):
    """
    我们假设原有的datasets数据集有三个集合 train val test
    每一个数据集合中 都有多个样本，每一个样本有  id question context answer 四个内容
    在 batched=True的情况下，我们每次取得1000个样本（examples）进行处理。
    在处理过程中将分别对 train 数据集 val数据集 test数据集进行处理。
    假设首先取得train 中的1000条数据。
    这些数据可以 通过 取得 id questions context answer 得到各自的信息列表
    如果 batched=False 则每次取得的数据是单一数据进行处理。
    """
    tokenizer_examples = tokenizer(
        text=examples["question"],
        text_pair=examples["context"],
        return_offsets_mapping=True,
        # 增加滑动窗口
        return_overflowing_tokens=True,
        # 滑动窗口中下一行保留上一个句子中多少个字符，默认是0，也就是不保留
        stride=128,
        max_length=384,
        padding="max_length",
        truncation="only_second"
    )
    print(tokenizer_examples.keys())
    """
    tokenizer_examples 将正式处理训练集中的每条数据，如果batched==True 则每次并行处理1000条数据，否则每次只处理其中一条数据。
    处理的数据包含 'input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping', 'overflow_to_sample_mapping' 等等
    """
    # 将 1000条数据的字典中弹出 overflow_to_sample_mapping 数据 sample_mapping 它的长度是 1827
    sample_mapping = tokenizer_examples.pop("overflow_to_sample_mapping")
    start_positions = []
    end_positions = []
    examples_ids = []
    """
    由于采用了滑动窗口技术，因此 len(sample_mapping) 在第一个批次处理时候 不是1000 而是 1827 
    这说明了有些样本被拆分成了多个，并且赋予了相同的id
    overflow_to_sample_mapping 信息记录了1827个样本其原始1000个样本的id号映射关系是什么
    """
    for idx, _ in enumerate(sample_mapping):
        """
        sample_mapping 记录了1827个拆分后的样本和原始的1000个样本的索引对应关系。
        因此要取得问题的答案，只需要找到对应的索引就行了
        假设idx =  1800  则首先取得 sample_mapping[1800]个拆分样本，
        其对应的id号假设是900，则取得examples['answers'][900]作为改题目的答案
        """
        awswer = examples['answers'][sample_mapping[idx]]
        """
        每一个 awswer 是拆分前样本的问题信息 ，包含了 answer_start 和 text内容
        answer_start 指的是每一个原始样本的答案开头的字符位置
        text 则代表答案内容，取得其长度在加上开头位置，即可以确定答案的结束位置。
        """
        start_char = awswer['answer_start'][0]
        end_char = start_char + len(awswer['text'][0])
        # content_start是指 采用sequence_ids(idx) 得到单个样本中首次出现 1的位置
        # sequence_ids 指的是样本的序列标签，包含了开头部分的多个None值
        """
        sequence_ids 包含了
        首个 None 代表开头特殊字符 
        若干个0 代表问题 
        中间一个None 代表问题和content的中间间隔
        若干个1 代表content信息
        若干个None 代表内容pad补全，包含无意义的信息以及最后的结束符。
        [None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, None, 
        1, 1, 1, 1, 1, 1, 1, 1, 1 .......
        None, None, None, None, None,.............., None, None, None, None, None]
        """
        # 要取得content的开头token索引位置，需要找到第一个1的位置
        content_start = tokenizer_examples.sequence_ids(idx).index(1)
        # 要取得content的结尾token索引位置，需要从content开头的索引位置开始搜索，一直找到第一个None的位置。
        content_end = tokenizer_examples.sequence_ids(idx).index(None, content_start) - 1
        # offset 指代在所有的token中每一个token的字符位置，由于offset和sequence_ids是一一对应的
        # 又找到了content在sequence_ids的位置，也就是说找到了 content 在offset 的位置。而offset 和字符位置相关
        # 这样我们就可以通过offset 和 content 再结合答案开头和结尾的字符位置，把token位置给夹出来。
        # 有了答案在token中的位置，我们就可以对token做标记，将开头和结尾的token进行标注。
        offset = tokenizer_examples.get('offset_mapping')[idx]
        if offset[content_end][1] < start_char or offset[content_start][0] > end_char:
            # print("答案不在范围内")
            start_token_pos = 0
            end_token_pos = 0
        else:
            token_id = content_start
            while token_id <= content_end and offset[token_id][0] < start_char:
                token_id += 1
            start_token_pos = token_id
            token_id = content_end
            while token_id >= content_start and offset[token_id][1] > end_char:
                token_id -= 1
            end_token_pos = token_id

        start_positions.append(start_token_pos)
        end_positions.append(end_token_pos)
        examples_ids.append(examples['id'][sample_mapping[idx]])
        # 预测时候使用
        tokenizer_examples['offset_mapping'][idx] = [
            (o if tokenizer_examples.sequence_ids(idx)[k] == 1 else None)
            for k, o in enumerate(tokenizer_examples['offset_mapping'][idx])
        ]

    tokenizer_examples["examples_ids"] = examples_ids
    tokenizer_examples["start_positions"] = start_positions
    tokenizer_examples["end_positions"] = end_positions
    """
    print(tokenizer_examples.keys())
    数据处理前
    ['input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping', 'overflow_to_sample_mapping']
    数据处理中
    将 无用的 overflow_to_sample_mapping 弹出来
    将 offset_mapping 进行重构只留下1的部分（内容部分，剩余都是None不参与计算）
    增加 examples_ids 记录样本原始ID
    数据处理后
    dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping', 'examples_ids', 'start_positions', 'end_positions'])
    input_ids id 列表 101 102 为特殊分隔字符 0为 pad [101, 5745, 2455, 7563, 3221, 784, 720, 3198, 952, 6158, 818, 711, 712, 3136, 4638, 8043, 102, 3189, 3091, 1285, 5745, 2455, 7563, 711, 1921, 712, 3136, 3777, 1079, 2600, 3136, 1277, 2134, 2429, 5392, 4415, 809, 1856, 6133, 6421, 3136, 1277, 2600, 712, 3136, 4638, 4958, 5375, 511, 8447, 2399, 124, 3299, 8133, 3189, 8024, 5745, 2455, 7563, 6158, 3136, 2134, 5735, 3307, 924, 4882, 753, 686, 3091, 1285, 711, 1921, 712, 3136, 3777, 1079, 2600, 3136, 1277, 2600, 712, 3136, 2400, 1076, 1921, 712, 3136, 6446, 2255, 3136, 1277, 2134, 2429, 5392, 4415, 8039, 1398, 2399, 8111, 3299, 8153, 3189, 8024, 5735, 3307, 924, 4882, 753, 686, 3091, 1285, 5745, 2455, 7563, 711, 3364, 3322, 511, 5745, 2455, 7563, 1762, 8396, 2399, 5635, 8285, 2399, 3309, 7313, 1139, 818, 1921, 712, 3136, 6632, 1298, 712, 3136, 1730, 712, 2375, 511, 8263, 2399, 125, 3299, 8153, 3189, 8024, 3136, 2134, 5735, 3307, 924, 4882, 753, 686, 818, 1462, 1921, 712, 3136, 6446, 2255, 3136, 1277, 1076, 1921, 712, 3136, 7770, 2398, 3136, 1277, 1426, 1045, 3345, 712, 3136, 711, 1921, 712, 3136, 3777, 1079, 2600, 3136, 1277, 5392, 4415, 712, 3136, 8039, 1350, 5635, 8232, 2399, 123, 3299, 8131, 3189, 8024, 5745, 2455, 7563, 1728, 5815, 2821, 6791, 1343, 2600, 712, 3136, 5466, 1218, 5445, 5783, 828, 8039, 1426, 1045, 3345, 1398, 3189, 4696, 7370, 1921, 712, 3136, 3777, 1079, 2600, 3136, 1277, 2600, 712, 3136, 5466, 1218, 511, 5745, 2455, 7563, 754, 8170, 2399, 123, 3299, 8130, 3189, 3926, 3247, 1762, 3777, 1079, 4895, 686, 8024, 775, 2399, 8426, 2259, 8039, 1071, 5873, 4851, 754, 1398, 3299, 8153, 3189, 677, 1286, 1762, 1921, 712, 3136, 3777, 1079, 2600, 3136, 1277, 2600, 712, 3136, 2429, 1828, 715, 6121, 511, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    token_type_ids 只有二分类信息，不全面 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    attention_mask 只有是否是pad的信息  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
    offset_mapping 取得内容有效信息 [None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, (516, 517), (517, 518), (518, 519), (519, 520), (520, 521), (521, 522), (522, 523), (523, 524), (524, 525), (525, 526), (526, 527), (527, 528), (528, 529), (529, 530), (530, 531), (531, 532), (532, 533), (533, 534), (534, 535), (535, 536), (536, 537), (537, 538), (538, 539), (539, 540), (540, 541), (541, 542), (542, 543), (543, 544), (544, 545), (545, 546), (546, 547), (547, 548), (548, 552), (552, 553), (553, 554), (554, 555), (555, 557), (557, 558), (558, 559), (559, 560), (560, 561), (561, 562), (562, 563), (563, 564), (564, 565), (565, 566), (566, 567), (567, 568), (568, 569), (569, 570), (570, 571), (571, 572), (572, 573), (573, 574), (574, 575), (575, 576), (576, 577), (577, 578), (578, 579), (579, 580), (580, 581), (581, 582), (582, 583), (583, 584), (584, 585), (585, 586), (586, 587), (587, 588), (588, 589), (589, 590), (590, 591), (591, 592), (592, 593), (593, 594), (594, 595), (595, 596), (596, 597), (597, 598), (598, 599), (599, 600), (600, 601), (601, 603), (603, 604), (604, 606), (606, 607), (607, 608), (608, 609), (609, 610), (610, 611), (611, 612), (612, 613), (613, 614), (614, 615), (615, 616), (616, 617), (617, 618), (618, 619), (619, 620), (620, 621), (621, 622), (622, 623), (623, 624), (624, 625), (625, 626), (626, 627), (627, 631), (631, 632), (632, 633), (633, 637), (637, 638), (638, 639), (639, 640), (640, 641), (641, 642), (642, 643), (643, 644), (644, 645), (645, 646), (646, 647), (647, 648), (648, 649), (649, 650), (650, 651), (651, 652), (652, 653), (653, 657), (657, 658), (658, 659), (659, 660), (660, 662), (662, 663), (663, 664), (664, 665), (665, 666), (666, 667), (667, 668), (668, 669), (669, 670), (670, 671), (671, 672), (672, 673), (673, 674), (674, 675), (675, 676), (676, 677), (677, 678), (678, 679), (679, 680), (680, 681), (681, 682), (682, 683), (683, 684), (684, 685), (685, 686), (686, 687), (687, 688), (688, 689), (689, 690), (690, 691), (691, 692), (692, 693), (693, 694), (694, 695), (695, 696), (696, 697), (697, 698), (698, 699), (699, 700), (700, 701), (701, 702), (702, 703), (703, 704), (704, 705), (705, 706), (706, 707), (707, 708), (708, 709), (709, 710), (710, 714), (714, 715), (715, 716), (716, 717), (717, 719), (719, 720), (720, 721), (721, 722), (722, 723), (723, 724), (724, 725), (725, 726), (726, 727), (727, 728), (728, 729), (729, 730), (730, 731), (731, 732), (732, 733), (733, 734), (734, 735), (735, 736), (736, 737), (737, 738), (738, 739), (739, 740), (740, 741), (741, 742), (742, 743), (743, 744), (744, 745), (745, 746), (746, 747), (747, 748), (748, 749), (749, 750), (750, 751), (751, 752), (752, 753), (753, 754), (754, 755), (755, 756), (756, 757), (757, 758), (758, 759), (759, 760), (760, 761), (761, 762), (762, 763), (763, 767), (767, 768), (768, 769), (769, 770), (770, 772), (772, 773), (773, 774), (774, 775), (775, 776), (776, 777), (777, 778), (778, 779), (779, 780), (780, 781), (781, 782), (782, 783), (783, 785), (785, 786), (786, 787), (787, 788), (788, 789), (789, 790), (790, 791), (791, 792), (792, 793), (793, 795), (795, 796), (796, 797), (797, 798), (798, 799), (799, 800), (800, 801), (801, 802), (802, 803), (803, 804), (804, 805), (805, 806), (806, 807), (807, 808), (808, 809), (809, 810), (810, 811), (811, 812), (812, 813), (813, 814), (814, 815), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None]
    examples_ids 样本id TRAIN_186_QUERY_0
    start_positions 内容答案起始位置 122
    end_positions 内容答案结束位置 128
    """
    return tokenizer_examples


def get_result(start_logits, end_logits, examples, features):
    predictions = {}
    references = {}
    example_to_feature = collections.defaultdict(list)
    # 将features 进行字典映射，以 样本ID为Key idx 列表为 value
    for idx, example_id in enumerate(features["examples_ids"]):
        example_to_feature[example_id].append(idx)

    # 最优答案的优选
    n_best = 20
    max_answer_length = 30
    # 循环每一个验证样本
    for example in examples:
        example_id = example["id"]
        context = example["context"]
        answers = []
        """
        start_logits （样本数 384）
        end_logits（样本数 384）
        
        单个样本中 feature_idx 可以是多个，代表被拆分后的样本id 对应经过模型后的  start_logits和end_logits 的 id
        """
        # 在一道题目中找到最好的答案
        for feature_idx in example_to_feature[example_id]:
            # start_logit 和 end_logit 长度分别为 384
            start_logit = start_logits[feature_idx]
            end_logit = end_logits[feature_idx]
            # print("start_logit",start_logit)
            # print("end_logit", end_logit)
            # 拿到相应样本的offset_mapping 信息
            offset = features[feature_idx]["offset_mapping"]
            # 拿到 所有的384个数中最大个20个数 相当于排序后取前20
            start_indexes = np.argsort(start_logit)[::-1][:n_best].tolist()
            # 拿到 所有的384个数中最大个20个数
            end_indexes = np.argsort(end_logit)[::-1][:n_best].tolist()
            for start_index in start_indexes:
                for end_index in end_indexes:
                    # 判断如果选出来的位置是None就忽略
                    if offset[start_index] is None or offset[end_index] is None:
                        continue
                    # 判断end小于start的属于非正常信息忽略，或者取得的答案长度超过最长的回答长度的也忽略
                    if end_index < start_index or end_index - start_index + 1 > max_answer_length:
                        continue
                    # 添加回答，和每个回答的分数
                    answers.append(
                        {"text": context[offset[start_index][0]:offset[end_index][1]],
                         "score": start_logit[start_index] + end_logit[end_index]}
                    )
        if len(answers) > 0:
            """
              由于之前样本做过切分，因此一道题目可能变成多个小题目（滑动窗口方式），
              如果回答大于1 说明在多个小题中找到多个答案，这里要选择分数最高的答案
            """
            best_answer = max(answers, key=lambda x: x["score"])
            predictions[example_id] = best_answer["text"]
        else:
            # 没有找到答案，说明这个题目没有答案
            predictions[example_id] = ""
        # 针对有答案的题目我们给答案，有些数据集合（如测试集）没有答案，因此这里给空字符串
        if "answers" in example:
            references[example_id] = example["answers"]["text"]
        else:
            references[example_id] = ""
    return predictions, references


from transformers import BertForQuestionAnswering
from transformers.trainer_utils import EvalPrediction


def compute_metrics(preds):
    """
    from transformers import BertForQuestionAnswering
    1、经过bert模型之后的输出 outputs[0].shape torch.Size([2, 384, 768])
    2、经过一个线性层将 torch.Size([2, 384, 768] 变成 torch.Size([2, 384, 2]
    2、拆分结果 start_logits [2（batch_size）, 384(文本长度)] 标签  start_positions [2]
    3、end_logits [2（batch_size）, 384(文本长度)] 标签  end_positions [2]
    4、将 start_logits 的 2*384的特征和标签 start_positions【2】进行 CrossEntropyLoss计算
      将 end_logits 的 2*384的特征和标签 end_positions【2】进行 CrossEntropyLoss计算
      开头和结尾都算一下。最后 total_loss 只计算答案开头和结尾位置的 loss（开头结尾loss 平均值。)
    5、输出结果 loss start_logits tensor(5.6042,)
      输出结果 start_logits torch.Size([2, 384])
      输出结果 end_logits torch.Size([2, 384])
    """
    """
    from transformers.trainer_utils import EvalPrediction
    preds（EvalPrediction类） 分成三个属性 1、predictions 2、label_ids 3、inputs
    且可进行列表方式访问 preds[0] predictions preds[1] label_ids preds[2] inputs
    label_ids 是指的开头和结尾位置的列表组合，用于评估数据集长度为6327
    因此 label_ids 是一个开头列表（长度为 6327）和结尾列表（长度也为 6327）组成的长度为2的数组
    predictions 则是一个 6327*384 的开头列表和 6327*384 的结尾列表
    """
    # 拿到输出中的 start_logits end_logits 对应模型输出后的 start_logits 和 end_logits
    start_logits, end_logits = preds[0]
    if start_logits.shape[0] == len(tokenized_datasets["validation"]):
        """
        start_logits (6327, 384) end_logits (6327, 384)
        
        datasets["validation"]
            Dataset({
            features: ['id', 'context', 'question', 'answers'],
            num_rows: 3219
        })
        
        tokenized_datasets["test"]
        Dataset({
            features: ['input_ids', 'token_type_ids', 'attention_mask', 'offset_mapping', 'examples_ids', 'start_positions', 'end_positions'],
            num_rows: 1988
        })
        """
        p, r = get_result(start_logits, end_logits, datasets["validation"], tokenized_datasets["validation"])
    else:
        p, r = get_result(start_logits, end_logits, datasets["test"], tokenized_datasets["test"])
    # 拿到 predictions, references 一个是计算得到的信息，一个是label信息，（测试集没有label）
    return evaluate_cmrc(p, r)


datasets = DatasetDict.load_from_disk('/data/datasets/cmrc2018')
tokenizer = AutoTokenizer.from_pretrained("/data/models/huggingface/chinese-macbert-base")
# tokenized_datasets = datasets.map(process_func, remove_columns=datasets['train'].column_names)
tokenized_datasets = datasets.map(process_func, batched=True, remove_columns=datasets['train'].column_names)
model = AutoModelForQuestionAnswering.from_pretrained("/data/models/huggingface/chinese-macbert-base")
# print(model)
training_args = TrainingArguments(
    output_dir="/data/model_for_qa/output",
    per_device_train_batch_size=2,
    per_device_eval_batch_size=2,
    gradient_accumulation_steps=2,
    # eval_strategy='epoch',
    # 20 步做一次评估
    eval_strategy="steps",
    eval_steps=20,
    learning_rate=5e-5,
    weight_decay=0.01,
    warmup_ratio=0.1,
    # metric_for_best_model='f1',
    # load_best_model_at_end=True,
    # 多少步打印一下日志信息
    logging_steps=10,
    num_train_epochs=5
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets['train'],
    eval_dataset=tokenized_datasets['validation'],
    data_collator=DefaultDataCollator(),
    compute_metrics=compute_metrics
)

trainer.train()
#
# from transformers import pipeline
#
# pipe = pipeline("question-answering", model=model, tokenizer=tokenizer, device=0)
# pipe(question="小明在哪里上的飞机？",
#      context="小明去中国，由于定了到北京的机票，所以先坐车去了大阪，然后从大阪上的飞机，坐了飞机去了北京。")
