import os
import evaluate
from datasets import DatasetDict,load_from_disk
from transformers import AutoTokenizer, AutoModelForMultipleChoice, TrainingArguments, Trainer
import copy
import torch
import pathlib

save_model_folder = pathlib.Path(__file__).parent.joinpath('models')
data_folder = pathlib.Path(__file__).parent.joinpath('c3')



# 加载数据集
c3 = load_from_disk(data_folder.resolve().__str__())
c3 = c3.filter(lambda x:x['answer'])


model_folder = r'D:\models\chinese-macbert-base'
tokenizer = AutoTokenizer.from_pretrained(model_folder)
model = AutoModelForMultipleChoice.from_pretrained(model_folder)

from transformers.models.bert.modeling_bert import BertForMultipleChoice

# 批量处理
def function_data(data1):
    choices = data1['choice']
    context = [data1['context'][0] for _ in choices]
    question = [data1['question'] + " " + _ for _ in choices]
    
    if len(context)<4:
        for _ in range(0,4-len(context)):
            context.append(data1['context'][0])
            question.append(data1['question'] + " " + "不知道")
    
    res_txt1 = tokenizer(text=context,text_pair=question,truncation="only_first",max_length=256,padding='max_length') # return_tensors='pt'
    # res_txt1_copy = copy.deepcopy(res_txt1)
    res_txt2 = {}
    for k,v in res_txt1.items():
        # v [[256],[256],[256],[256]]        4,256 
        # res_txt2[k] = [v] # 增加batch维度 （1,4,256）
        res_txt2[k] = v # 增加batch维度 （1,4,256）
        # res_txt2[k] = v.reshape(-1,4,256)
    
    # res_txt2['labels']=torch.Tensor([choices.index(data1['answer'])])
    # res_txt2['labels']=[choices.index(data1['answer'])]
    res_txt2['labels']=choices.index(data1['answer'])
    return res_txt2


# 批量处理
def function_data_batch(data1):
    context_list=[]
    question_choice_list=[]
    labels=[]
    for idx in range(len(data1['context'])):
        choices:list = data1['choice'][idx]
        context:list = data1['context'][idx] 
        question:str = data1['question'][idx] 
        answer:str = data1['answer'][idx]
        ctx:str = '\n'.join(context)
        
        # for choice in choices:
        contexts = [ctx for _ in choices]
        questions = [question + " " + _ for _ in choices]
    
        if len(choices)<4:
            for _ in range(0,4-len(choices)):
                contexts.append(ctx)
                questions.append(question + " " + "不知道")
                
        labels.append(choices.index(answer))
        context_list.extend(contexts)
        question_choice_list.extend(questions)
        
    res_txt1 = tokenizer(text=context_list,text_pair=question_choice_list,truncation="only_first",max_length=256,padding='max_length') # return_tensors='pt'
    # res_txt1_copy = copy.deepcopy(res_txt1)
    res_txt2 = {}
    for k,v in res_txt1.items():
        v_reshape=[]
        for i in range(0,len(v),4):
            v_reshape.append(v[i:i+4]) 
        res_txt2[k] = v_reshape
        # v [[256],[256],[256],[256]]        4,256 
        # res_txt2[k] = [v] # 增加batch维度 （1,4,256）
        # res_txt2[k] = v # 增加batch维度 （1,4,256）
        # res_txt2[k] = v.reshape(-1,4,256)
    
    # res_txt2['labels']=torch.Tensor([choices.index(data1['answer'])])
    # res_txt2['labels']=[choices.index(data1['answer'])]
    res_txt2['labels']=labels
    return res_txt2





c4 = c3.map(function_data_batch,batched=True,remove_columns=c3['train'].column_names)





data1 = c4['train'][0]


# 开始训练

args = TrainingArguments(
    output_dir=save_model_folder.resolve().__str__(),
    per_device_train_batch_size = 100,
    per_device_eval_batch_size= 100,
    save_strategy="epoch",
    eval_strategy="epoch", # steps
    # eval_steps=2,
    # metric_for_best_model="f1",
    logging_steps=10,
    num_train_epochs=3,
    save_total_limit=3
)

tokenizer_datasets = c4

trainer = Trainer(
    model=model,
    args=args,
    train_dataset=tokenizer_datasets['train'],
    eval_dataset=tokenizer_datasets['validation'],
    tokenizer = tokenizer,
    # compute_metrics=eval_metric,
    # data_collator=DefaultDataCollator(),
    # metric_for_best_model="f1",
    # load_best_model_at_end=True,
)


trainer.train()







''''
单独的数据进行模型测试

'''

# # 转换格式
# res_txt1 = tokenizer(text=[ data1['context'][0] for _ in data1['choice']],text_pair=[ data1['question']+" "+_ for _ in data1['choice']],truncation=True,max_length=256,padding='max_length',return_tensors='pt') # return_tensors='pt'
# res_txt1_copy = copy.deepcopy(res_txt1)

# for k,v in res_txt1_copy.items():
#     res_txt1[k] = v.reshape(-1,4,256)

# res_txt1['labels']=torch.tensor([data1['choice'].index(data1['answer'])])

# # display(res_txt1['input_ids'].shape,res_txt1['labels'])
# r = model(**res_txt1)
# print(r)




