
from dataclasses import dataclass
from functools import partial
from transformers import AutoTokenizer
from torch.utils.data import Dataset,DataLoader
import torch


        

# # print(test_data)
class MyDataset(Dataset):
    def __init__(self,trainData):
        self.data=trainData


    def __getitem__(self, index) :
        d=self.data[index]

        return {'question':d['question'],'answer':d['answer'],'digests':d['digests']}
    
    def __len__(self):
        return len(self.data)



# llm的输入是question+句向量
# 句向量的输入是一组tensor，这个真的很麻烦，输出得是一组句向量

@dataclass
class DataCollator:
    llm_tokenizer : AutoTokenizer
    sent2vec_tokenizer : AutoTokenizer
    max_length:int
    padding:bool=True
    def __call__(self, batch):

        question=[x['question'] for x in batch]
        answer=[x['answer'] for x in batch]
        digests=[x['digests'] for x in batch]
        real_inputs=[]
        # print('digests',digests)
        for i in range(len(question)):
            temp=[]
            for j in range(len(digests[i])):
                strNeedConcat="背景：\n"
                for k in range(len(digests[i][j]['datas'])):
                    fact=digests[i][j]['datas'][k]['desc']
                    strNeedConcat+=f'{[j]} {fact}\n'
                    # if k>0:
                    #     print('地震了',i,j,k)
                    #     print(fact)
                strNeedConcat+=f"问题:{question[i]}\n"
                temp.append(strNeedConcat)
            if temp !=[]:
                real_inputs.append(temp)

        input_ids_for_sent2vec=[]
        for i in real_inputs:

            input_ids_for_sent2vec.append(self.sent2vec_tokenizer(
                i,
                return_tensors="pt",
                truncation=True,
                max_length=self.max_length,
                padding=True,
                ))

       
        input_ids_for_llm=self.llm_tokenizer(
            question,
            return_tensors="pt",
            truncation=True,
            max_length=self.max_length,
            padding=True,
        )
        
        
        labels=self.llm_tokenizer(
            answer,
            return_tensors="pt",
            truncation=True,
            max_length=self.max_length,
            padding=True,
        )
        labels[labels == self.llm_tokenizer.pad_token_id] = -100

        return {'input_ids_for_llm':input_ids_for_llm,'labels':labels,'input_ids_for_sent2vec':input_ids_for_sent2vec}


# import json
# # with open('/home/lxy/multiR/Fuser/data/train.json','r',encoding='utf-8') as f:
# #     train_data=json.load(f)['data']

# # with open('/home/lxy/multiR/Fuser/data/dev.json','r',encoding='utf-8') as f:
# #     eval_data=json.load(f)['data']

# with open('/home/lxy/multiR/Fuser/data/test.json','r',encoding='utf-8') as f:
#     test_data=json.load(f)['data']
# # train_dataset=MyDataset(train_data)
# # eval_dataset=MyDataset(eval_data)
# test_dataset=MyDataset(test_data)
# tokenizer = AutoTokenizer.from_pretrained("/data/lxy/baichuan2-chat-7b",trust_remote_code=True)

# myDataloader=DataLoader(test_dataset,batch_size=2,collate_fn=DataCollator(tokenizer,tokenizer,512,True))
# for i in myDataloader:
#     print('???',i['input_ids_for_sent2vec'])

