import numpy as np
import datasets
import evaluate
import json
from datasets import load_dataset
from cmrc_eval import evaluate_cmrc
import collections
from datasets import load_dataset, DatasetDict
from transformers import DefaultDataCollator
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, Trainer, TrainingArguments
import evaluate
from transformers import AutoTokenizer,AutoModelForMultipleChoice,TrainingArguments,Trainer

dataset = load_dataset("dataset_creator.py",split="train")
sdataset = dataset.train_test_split(test_size=0.1)
train_dataset = sdataset["train"]
val_dataset = sdataset["test"]
tokenizer = AutoTokenizer.from_pretrained("/data/models/huggingface/chinese-macbert-large")
def process_function(examples):
    # print(examples['id'][0],examples['context'][0],examples['question'][0],examples['choice'][0],examples['answer'][0])
    context = []
    question_choice = []
    labels = []
    for idx in range(len(examples['id'])):
        ctx =  examples['context'][idx][0]
        question = examples['question'][idx]
        choices = examples['choice'][idx]
        for choice in choices:
            context.append(ctx)
            question_choice.append(question+" 选项列表:"+choice)
        # 不足5个选项信息，补全到5个。
        if len(choices) <5:
            for _ in range(5-len(choices)):
                context.append(ctx)
                question_choice.append(question+" 选项列表:"+"不知道")
        labels.append(choices.index(examples["answer"][idx]))
    tokenized_datasets =  tokenizer(context,question_choice,truncation="only_first",max_length=512,padding="max_length")
    # for i in  tokenized_datasets['input_ids'][0:5]:
        # print(i)
    tokenized_datasets = {k:[v[i:i+5] for i in range(0,len(v),5)] for k,v in tokenized_datasets.items()}
    # print(len(tokenized_datasets['input_ids']))
    tokenized_datasets["labels"] = labels
    return tokenized_datasets
tokenized_datasets = sdataset.map(process_function, batched=True, remove_columns=sdataset['train'].column_names)
print(tokenized_datasets["train"][0:2])
