from datasets import load_dataset, DatasetDict
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, TrainingArguments, Trainer, DefaultDataCollator
from rich import print
import torch
import pathlib

data_folder = r'C:\Users\COLORFUL\Desktop\AI_NLP\hgface\3-question_answering\mrc_data'
model_folder = r'D:\models\chinese-macbert-base'
save_model_folder = pathlib.Path(__file__).parent.joinpath('models')
save_model_folder.mkdir(parents=True,exist_ok=True)


datasets = DatasetDict.load_from_disk(data_folder)
tokenizer = AutoTokenizer.from_pretrained(model_folder)
model = AutoModelForQuestionAnswering.from_pretrained(model_folder)
print(type(model))
# from transformers.models.bert.modeling_bert import BertForQuestionAnswering



'''
单条数据进行测试 
'''




# sample_dataset = datasets["train"].select(range(1))
# sample_dataset


# tokenized_examples = tokenizer(
#     text=sample_dataset['question'],
#     text_pair = sample_dataset['context'],
#     max_length=300,
#     padding=True,
#     return_offsets_mapping=True,
#     truncation='only_second',
#     return_tensors = 'pt'
# )
# print(list(tokenized_examples.keys()))




# # def function_data(tokenized_examples):

# # 选取第一个
# offset_mapping = tokenized_examples['offset_mapping'][0]
# # print(offset_mapping)

# '''
# offset_mapping
# [
#     (0, 0),
#     (0, 1),
#     (1, 2),
#     (2, 3),
#     (3, 4),
#     (0, 0),
#     (0, 1),
#     (1, 2),
#     (2, 3),
#     (3, 4),
#     (4, 5),
#     (5, 6),
#     (6, 7),
#     (7, 8),
#     (8, 9),
#     (9, 10),
#     (10, 11),
#     (11, 12),
#     (12, 13),
#     (13, 14),
#     (14, 15),
# '''

# sequence_ids = tokenized_examples.sequence_ids(0)
# context_start = sequence_ids.index(1)
# context_end = sequence_ids.index(None,context_start)-1
# print('context_start',context_start,'context_end',context_end)

# # 开始获取对应的 tokenized_examples input_ids,对应的 开始节点和

# answers = sample_dataset['answers'][0]
# start_char = answers['answer_start'][0]       #  30 
# end_char = start_char+len(answers['text'][0])  # 35

# if offset_mapping[context_end][0]<start_char:
#     start_token_id = 0
#     end_token_id = 0
# else:
#     start_token = context_start
#     while 1:
#         if offset_mapping[start_token][0]<start_char:
#             start_token+=1
#         else:
#             start_token_id = start_token
#             break
    
#     end_token = context_end
#     while 1:
#         if offset_mapping[end_token][1]>end_char:
#             end_token-=1
#         else:
#             end_token_id = end_token
#             break
# print('start_token_id',start_token_id,'end_token_id',end_token_id) 
# tokenized_examples['start_positions']=torch.tensor([start_token_id])
# tokenized_examples['end_positions']=torch.tensor([end_token_id])
# print(tokenized_examples)
    
# res = model(
#     input_ids=tokenized_examples['input_ids'],
#     attention_mask=tokenized_examples['attention_mask'],
#     token_type_ids=tokenized_examples['token_type_ids'],
#     start_positions=tokenized_examples['start_positions'],
#     end_positions=tokenized_examples['end_positions']
# )



def function_data(sample_dataset):
    tokenized_examples = tokenizer(
        text=sample_dataset['question'],
        text_pair = sample_dataset['context'],
        max_length=300,
        padding='max_length',
        return_offsets_mapping=True,
        truncation='only_second'
        # return_tensors = 'pt'
    )
    # print(list(tokenized_examples.keys()))
    # def function_data(tokenized_examples):

    # 选取第一个
    # offset_mapping = tokenized_examples['offset_mapping'][0]
    offset_mapping = tokenized_examples['offset_mapping']
    # print(offset_mapping)

    '''
    offset_mapping
    [
        (0, 0),
        (0, 1),
        (1, 2),
        (2, 3),
        (3, 4),
        (0, 0),
        (0, 1),
        (1, 2),
        (2, 3),
        (3, 4),
        (4, 5),
        (5, 6),
        (6, 7),
        (7, 8),
        (8, 9),
        (9, 10),
        (10, 11),
        (11, 12),
        (12, 13),
        (13, 14),
        (14, 15),
    '''

    sequence_ids = tokenized_examples.sequence_ids()
    context_start = sequence_ids.index(1)
    context_end = sequence_ids.index(None,context_start)-1
    # print('context_start',context_start,'context_end',context_end)

    # 开始获取对应的 tokenized_examples input_ids,对应的 开始节点和

    answers = sample_dataset['answers']
    start_char = answers['answer_start'][0]       #  30 
    end_char = start_char+len(answers['text'][0])  # 35

    if offset_mapping[context_end][0]<start_char:
        start_token_id = 0
        end_token_id = 0
    else:
        start_token = context_start
        while 1:
            if offset_mapping[start_token][0]<start_char:
                start_token+=1
            else:
                start_token_id = start_token
                break
        
        end_token = context_end
        while 1:
            if offset_mapping[end_token][1]>end_char:
                end_token-=1
            else:
                end_token_id = end_token
                break
    # print('start_token_id',start_token_id,'end_token_id',end_token_id) 
    tokenized_examples['start_positions']=torch.tensor([start_token_id])
    tokenized_examples['end_positions']=torch.tensor([end_token_id])
    # print(tokenized_examples)
    return tokenized_examples


tokenizer_datasets = datasets.map(function_data,batched=False,remove_columns=datasets["train"].column_names)
print(tokenizer)
    

args = TrainingArguments(
    output_dir=save_model_folder.resolve().__str__(),
    per_device_train_batch_size = 256,
    per_device_eval_batch_size= 256,
    save_strategy="epoch",
    eval_strategy="epoch", # steps
    # eval_steps=2,
    # metric_for_best_model="f1",
    logging_steps=10,
    num_train_epochs=3,
    save_total_limit=3
)


trainer = Trainer(
    model=model,
    args=args,
    train_dataset=tokenizer_datasets['train'],
    eval_dataset=tokenizer_datasets['validation'],
    tokenizer = tokenizer,
    # compute_metrics=eval_metric,
    data_collator=DefaultDataCollator(),
    # metric_for_best_model="f1",
    # load_best_model_at_end=True,
)


trainer.train()




