
from dataclasses import dataclass
from functools import partial
from transformers import AutoTokenizer,AutoModel
from torch.utils.data import Dataset,DataLoader
import torch


        

# # print(test_data)
class MyDataset(Dataset):
    def __init__(self,trainData):
        self.data=trainData


    def __getitem__(self, index) :
        d=self.data[index]

        return {'question':d['question'],'answer':d['answer'],'digests':d['digests']}
    
    def __len__(self):
        return len(self.data)



# llm的输入是question+句向量
# 句向量的输入是一组tensor，这个真的很麻烦，输出得是一组句向量

@dataclass
class DataCollator:
    llm_tokenizer : AutoTokenizer
    sent2vec_tokenizer : AutoTokenizer
    max_length:int
    padding:bool=True
    def __call__(self, batch):

        question=[x['question'] for x in batch]
        answer=[x['answer'] for x in batch]
        digests=[x['digests'] for x in batch]
        real_inputs=[]
        # print('digests',digests)
        # digests的结构是i j k ,i指的是第i个问题，j指的是第j个文档，但是每个文档在网页里可能是断开的，所以这第j个文档可能有k个片段
        # 实际上我们需要把这k个片段先合并起来，最后当成i，j去处理。
        # exitflag=False
        for i in range(len(question)):
            temp=[]
            for j in range(len(digests[i])):
                strNeedConcat=""
                temp_str=""
                for k in range(len(digests[i][j]['datas'])):
                    fact=digests[i][j]['datas'][k]['desc']
                    temp_str+=f'{fact}'
                    # if k>0:
                    #     print('地震了',i,j,k)
                        
                    #     print(fact)
                    #     exitflag=True
                        
                strNeedConcat+=f"背景:\n【{j+1}】{temp_str}\n问题:{question[i]}\n"
                temp.append(strNeedConcat)
            if temp !=[]:
                real_inputs.append(temp)
        # if exitflag:
        #     print('digests[i]',digests[i])
        #     print('real_inputs',real_inputs)
        #     exit()
        # print('real_inputs',real_inputs)
        # real_inputs的形状是 bsz x doc num x doc len了,
        input_ids_for_sent2vec=[]
        for i in real_inputs:

            input_ids_for_sent2vec.append(self.sent2vec_tokenizer(
                i,
                return_tensors="pt",
                truncation=True,
                max_length=512,
                padding=True,
                ))
        # input_ids_for_sent2vec就是一个len是bsz的list，里面每一个张量都对应着一个增强的文档
        # print('input_ids_for_sent2vec',input_ids_for_sent2vec)
        print('answer',answer)
        print('question',question)
        input_for_llm=[q+a+'</s>' for q,a in zip(question,answer)]
        input_ids_for_llm=self.llm_tokenizer(
            input_for_llm,
            return_tensors="pt",
            truncation=True,
            max_length=self.max_length,
            padding=True,
        )
        print([(len(real_inputs[i]),len(question[i])) for i in range(len(answer))])
        # 不能这样操作，等长是要求的分词后等长
        question_len=[len(self.llm_tokenizer.encode(q)) for q in question]
        doc_len=max([len(real_inputs[i]) for i in range(len(answer))] )
        print('question_len',question_len)
        answer=['<unk>' *(doc_len+question_len[i])+answer[i]+'</s>' for i in range(len(answer))]
        print('input_for_llm',input_for_llm)
        print('answer',answer,len(answer))
        labels=self.llm_tokenizer(
            answer,
            return_tensors="pt",
            truncation=True,
            max_length=self.max_length,
            padding=True,
        ).input_ids
        labels[labels == self.llm_tokenizer.pad_token_id] = -100
        
        print('real_inputs',real_inputs)
        # print('answer',answer)
        print('input_ids_for_llm',input_ids_for_llm.input_ids,input_ids_for_llm.input_ids.shape)
        print('labels',labels,labels.shape)
        return {'input_ids_for_llm':input_ids_for_llm,'labels':labels,'input_ids_for_sent2vec':input_ids_for_sent2vec}
        # return []

import json
# with open('/home/lxy/multiR/Fuser/data/train.json','r',encoding='utf-8') as f:
#     train_data=json.load(f)['data']

# with open('/home/lxy/multiR/Fuser/data/dev.json','r',encoding='utf-8') as f:
#     eval_data=json.load(f)['data']

def mean_pooling(model_output, attention_mask):
        """
        本来参数是model_output，但是我在外面抽出了最后一层状态，这样有很大的问题，因为这里依赖于attention矩阵！好在这个正则化相当于自身的归一。
        之所以需要这一步，是因为pad位置的输出还不一样，而且也不是0，为了消除这个影响，只能手动对他们置于0
        """
        token_embeddings = model_output.last_hidden_state  # First element of model_output contains all token embeddings
        # 这个操作使得mask和embedding是一个维度了，本来一个是bsz x seqlen x hdz，mask是bsz x seqlen的，unsqueeze之后就是bsz x seqlen x1
        # 然后在最后一维复制hdz次，转成float是因为下面要运算
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        # 需要被mask掉的位置就会失去他的光辉，沿着句长维度求和。clamp是把数压缩在min，max之间，也是沿着句长维度求和，
        # 之所以min取了一个数字，是因为全0的问题？或者下溢出？
        return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

 
if __name__=='__main__':
    with open('/home/lxy/multiR/Fuser/data/test.json','r',encoding='utf-8') as f:
        test_data=json.load(f)['data'][:2]
    # train_dataset=MyDataset(train_data)
    # eval_dataset=MyDataset(eval_data)
    test_dataset=MyDataset(test_data)
    tokenizer = AutoTokenizer.from_pretrained("/data/lxy/baichuan2-chat-7b",trust_remote_code=True)
    sent2vec_tokenizer = AutoTokenizer.from_pretrained('/data/lxy/thenlper/gte-large-zh',model_max_length=512)
    # sent2vec = AutoModel.from_pretrained('/data/lxy/thenlper/gte-large-zh',torch_dtype=torch.float16).cuda()
    myDataloader=DataLoader(test_dataset,batch_size=2,collate_fn=DataCollator(tokenizer,sent2vec_tokenizer=sent2vec_tokenizer,max_length=512,padding=True))
    for i in myDataloader:
        print('')
        # input_ids_for_sent2vec=i['input_ids_for_sent2vec']
        # shit_tensor=torch.rand((1,3,1024)).cuda()
        # with torch.no_grad():
        #         doc_embeddings=[]
        #         for i in input_ids_for_sent2vec:
        #             i.to('cuda:0')
        #             # i的shape是 文档数量 x 文档长度（pad了）
        #             model_output = sent2vec(**i)
        #             # 这是一句话的输出,但其实本身shape应该是 document_size x 768
        #             mean_output = mean_pooling(model_output, i['attention_mask'])
        #         # bsz x document_size x 768
        #         doc_embeddings.append(mean_output) 
        #         # print(doc_embeddings)
        #         # print(len(doc_embeddings))
        #         b=torch.stack(doc_embeddings)
        #         print(shit_tensor)
        #         print(b,b.shape)
        #         c= torch.cat((shit_tensor, b), dim=1)
        #         print(c,c.shape)

