
from dataclasses import dataclass
from functools import partial
from transformers import AutoTokenizer,AutoModel
from torch.utils.data import Dataset,DataLoader
import torch


        

# # print(test_data)
class MyDataset(Dataset):
    def __init__(self,trainData):
        self.data=trainData


    def __getitem__(self, index) :
        d=self.data[index]

        return {'question':d['question'],'answer':d['answer'],'digests':d['digests']}
    
    def __len__(self):
        return len(self.data)



# llm的输入是question+句向量
# 句向量的输入是一组tensor，这个真的很麻烦，输出得是一组句向量

@dataclass
class DataCollator:
    llm_tokenizer : AutoTokenizer
    sent2vec_tokenizer : AutoTokenizer
    max_length:int
    padding:bool=True
    def __call__(self, batch):

        question=[x['question'] for x in batch]
        answer=[x['answer'] for x in batch]
        digests=[x['digests'] for x in batch]
        real_inputs=[]
        # print('digests',digests)
        for i in range(len(question)):
            temp=[]
            for j in range(len(digests[i])):
                strNeedConcat="背景：\n"
                for k in range(len(digests[i][j]['datas'])):
                    fact=digests[i][j]['datas'][k]['desc']
                    strNeedConcat+=f'{[j]} {fact}\n'
                    # if k>0:
                    #     print('地震了',i,j,k)
                    #     print(fact)
                strNeedConcat+=f"问题:{question[i]}\n"
                temp.append(strNeedConcat)
            if temp !=[]:
                real_inputs.append(temp)
        # print(real_inputs)
        input_ids_for_sent2vec=[]
        for i in real_inputs:

            input_ids_for_sent2vec.append(self.sent2vec_tokenizer(
                i,
                return_tensors="pt",
                truncation=True,
                max_length=self.max_length,
                padding=True,
                ))

       
        input_ids_for_llm=self.llm_tokenizer(
            question,
            return_tensors="pt",
            truncation=True,
            max_length=self.max_length,
            padding=False,
        )
        
        
        labels=self.llm_tokenizer(
            answer,
            return_tensors="pt",
            truncation=True,
            max_length=self.max_length,
            padding=False,
        )
        labels[labels == self.llm_tokenizer.pad_token_id] = -100

        return {'input_ids_for_llm':input_ids_for_llm,'labels':labels,'input_ids_for_sent2vec':input_ids_for_sent2vec}


import json
# with open('/home/lxy/multiR/Fuser/data/train.json','r',encoding='utf-8') as f:
#     train_data=json.load(f)['data']

# with open('/home/lxy/multiR/Fuser/data/dev.json','r',encoding='utf-8') as f:
#     eval_data=json.load(f)['data']

def mean_pooling(model_output, attention_mask):
        """
        本来参数是model_output，但是我在外面抽出了最后一层状态，这样有很大的问题，因为这里依赖于attention矩阵！好在这个正则化相当于自身的归一。
        之所以需要这一步，是因为pad位置的输出还不一样，而且也不是0，为了消除这个影响，只能手动对他们置于0
        """
        token_embeddings = model_output.last_hidden_state  # First element of model_output contains all token embeddings
        # 这个操作使得mask和embedding是一个维度了，本来一个是bsz x seqlen x hdz，mask是bsz x seqlen的，unsqueeze之后就是bsz x seqlen x1
        # 然后在最后一维复制hdz次，转成float是因为下面要运算
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        # 需要被mask掉的位置就会失去他的光辉，沿着句长维度求和。clamp是把数压缩在min，max之间，也是沿着句长维度求和，
        # 之所以min取了一个数字，是因为全0的问题？或者下溢出？
        return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

 

# with open('/home/lxy/multiR/Fuser/data/test.json','r',encoding='utf-8') as f:
#     test_data=json.load(f)['data'][:2]
# # train_dataset=MyDataset(train_data)
# # eval_dataset=MyDataset(eval_data)
# test_dataset=MyDataset(test_data)
# tokenizer = AutoTokenizer.from_pretrained("/data/lxy/baichuan2-chat-7b",trust_remote_code=True)
# sent2vec_tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5',model_max_length=512)
# sent2vec = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5',torch_dtype=torch.float16).cuda()
# myDataloader=DataLoader(test_dataset,batch_size=1,collate_fn=DataCollator(tokenizer,sent2vec_tokenizer=sent2vec_tokenizer,max_length=512,padding=True))
# for i in myDataloader:
#     # print('???',i['input_ids_for_sent2vec'])
#     input_ids_for_sent2vec=i['input_ids_for_sent2vec']
#     shit_tensor=torch.rand((1,3,1024)).cuda()
#     with torch.no_grad():
#             doc_embeddings=[]
#             for i in input_ids_for_sent2vec:
#                 i.to('cuda:0')
#                 # i的shape是 文档数量 x 文档长度（pad了）
#                 model_output = sent2vec(**i)
#                 # 这是一句话的输出,但其实本身shape应该是 document_size x 768
#                 mean_output = mean_pooling(model_output, i['attention_mask'])
#             # bsz x document_size x 768
#             doc_embeddings.append(mean_output) 
#             # print(doc_embeddings)
#             # print(len(doc_embeddings))
#             b=torch.stack(doc_embeddings)
#             print(shit_tensor)
#             print(b,b.shape)
#             c= torch.cat((shit_tensor, b), dim=1)
#             print(c,c.shape)

