from datasets import load_dataset
from transformers import AutoTokenizer,AutoModelForQuestionAnswering,TrainingArguments,DefaultDataCollator,Trainer

datasets = load_dataset('/root/lanyun-tmp/datasets/cmrc2018')
tokenizer = AutoTokenizer.from_pretrained("/root/lanyun-tmp/models/chinese-macbert-base")

def process_func(examples):
    tokenized_examples = tokenizer(text=examples["question"],
                                   text_pair=examples["context"],
                                   return_offsets_mapping=True,
                                   max_length=384,
                                   truncation="only_second",
                                   padding="max_length")
    offset_mapping = tokenized_examples.pop("offset_mapping")
    start_positions = []
    end_positions = []
    for idx, offset in enumerate(offset_mapping):
        answer = examples["answers"][idx]
        start_char = answer["answer_start"][0]
        end_char = start_char + len(answer["text"][0])
        # 定位答案在token中的起始位置和结束位置
        # 一种策略，我们要拿到context的起始和结束，然后从左右两侧向答案逼近
        context_start = tokenized_examples.sequence_ids(idx).index(1)
        context_end = tokenized_examples.sequence_ids(idx).index(None, context_start) - 1
        # 判断答案是否在context中
        if offset[context_end][1] < start_char or offset[context_start][0] > end_char:
            start_token_pos = 0
            end_token_pos = 0
        else:
            token_id = context_start
            while token_id <= context_end and offset[token_id][0] < start_char:
                token_id += 1
            start_token_pos = token_id
            token_id = context_end
            while token_id >= context_start and offset[token_id][1] > end_char:
                token_id -= 1
            end_token_pos = token_id
        start_positions.append(start_token_pos)
        end_positions.append(end_token_pos)

    tokenized_examples["start_positions"] = start_positions
    tokenized_examples["end_positions"] = end_positions
    return tokenized_examples

tokenized_examples = datasets.map(process_func, batched=True, remove_columns=datasets['train'].column_names)
model = AutoModelForQuestionAnswering.from_pretrained("/root/lanyun-tmp/models/chinese-macbert-base")

args = TrainingArguments(
    output_dir=r"/root/lanyun-tmp/HuggingFace/output_2",  #输出文件夹
    num_train_epochs=3,                                   #训练轮数
    per_device_train_batch_size=32,                       #训练批次大小
    per_device_eval_batch_size=32,                        #验证批次大小
    save_total_limit=3,                                   #最大保存数量
    save_strategy="epoch",                                #保存策略
    eval_strategy="epoch",                                #评估策略
    load_best_model_at_end=True,                          #保存最优模型(训练完成后加载最优模型)
    fp16=False,                                           #混合精度训练
    bf16=True,                                            #半精度训练
)

trainer = Trainer(
    model=model,
    args=args,
    tokenizer=tokenizer,
    train_dataset=tokenized_examples['train'],
    eval_dataset=tokenized_examples['validation'],
    data_collator=DefaultDataCollator(),
)

# trainer.train()

from transformers import pipeline
new_model = AutoModelForQuestionAnswering.from_pretrained("/root/lanyun-tmp/HuggingFace/output_2/checkpoint-951")
pipe = pipeline('question-answering', model=new_model, tokenizer=tokenizer,device=0)
print(pipe(question="你是谁？", context="你好，我是一只小鸟。"))

