from datasets import load_dataset
from transformers import AutoTokenizer,AutoModelForQuestionAnswering,TrainingArguments,DefaultDataCollator,Trainer


datasets = load_dataset('/root/lanyun-tmp/datasets/cmrc2018')
# print(datasets['train'][0])
tokenizer = AutoTokenizer.from_pretrained("/root/lanyun-tmp/models/chinese-macbert-base")
sample_dataset = datasets['train'].select(range(10))

#bert要求输入的token的长度不能超过512
# tokenized_examples = tokenizer(text=list(sample_dataset['question']),text_pair=list(sample_dataset['context']))
# print(tokenized_examples['input_ids'][0])
# print(len(tokenized_examples['input_ids'][0]))
# print(list(zip(tokenized_examples['input_ids'][0],tokenized_examples['token_type_ids'][0])))

#offset_mapping 标识每个词的起止的位置
tokenized_examples = tokenizer(text=list(sample_dataset['question']),
                               text_pair=list(sample_dataset['context']),
                               max_length=500,
                               truncation='only_second',#只对第二个文本做截断
                               padding='max_length',
                               return_offsets_mapping=True
                               )
print(tokenized_examples['input_ids'][0])
print(len(tokenized_examples['input_ids'][0]))
print(tokenized_examples['offset_mapping'][0])
print(len(tokenized_examples['offset_mapping'][0]))
#表示原始字符串的字符几开始和结束的位置（左闭右开）
offset_mapping = tokenized_examples.pop("offset_mapping")

# 找到答案在文本中的位置
for idx, offset in enumerate(offset_mapping):
    answer = sample_dataset[idx]["answers"]
    start_char = answer["answer_start"][0]
    end_char = start_char + len(answer["text"][0])
    # 定位答案在token中的起始位置和结束位置
    # 一种策略，我们要拿到context的起始和结束，然后从左右两侧向答案逼近

    context_start = tokenized_examples.sequence_ids(idx).index(1)
    context_end = tokenized_examples.sequence_ids(idx).index(None, context_start) - 1

    # 判断答案是否在context中
    if offset[context_end][1] < start_char or offset[context_start][0] > end_char:
        start_token_pos = 0
        end_token_pos = 0
    else:
        token_id = context_start
        while token_id <= context_end and offset[token_id][0] < start_char:
            token_id += 1
        start_token_pos = token_id
        token_id = context_end
        while token_id >= context_start and offset[token_id][1] > end_char:
            token_id -= 1
        end_token_pos = token_id

    print(start_token_pos, end_token_pos)
    print("token answer decode:",tokenizer.decode(tokenized_examples["input_ids"][idx][start_token_pos: end_token_pos + 1]))

