from transformers import BertTokenizer, BertModel,AutoModel,AutoTokenizer
import torch
import json
import logging
import warnings
from typing import (
    Any,
    Callable,
    Dict,
    List,
    Literal,
    Optional,
    Sequence,
    Set,
    Tuple,
    Union,
)

def cos_sim(a, b):
    """
    Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
    :return: Matrix with res[i][j]  = cos_sim(a[i], b[j])
    """
    if not isinstance(a, torch.Tensor):
        a = torch.tensor(a)

    if not isinstance(b, torch.Tensor):
        b = torch.tensor(b)

    if len(a.shape) == 1:
        a = a.unsqueeze(0)

    if len(b.shape) == 1:
        b = b.unsqueeze(0)

    a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
    b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
    return torch.mm(a_norm, b_norm.transpose(0, 1))

# Mean Pooling - Take attention mask into account for correct averaging


# print(model.embeddings.position_embeddings.weight.shape)
# with open('/root/DPR/summary_doc.json') as file:
#     summary_data=json.load(file)
# with open('/root/DPR/document_list.json') as ff:
#         json_data=json.load(ff)
        


# doc=[x['content']+'。'+y['content'] for x,y in zip(summary_data,json_data)]
# doc=[x['content'] for x in json_data]
# doc=[x['content'] for x in summary_data]


# Load model from HuggingFace Hub
# shibing624/text2vec-base-chinese 中文通用语义匹配任务推荐使用
# shibing624/text2vec-base-chinese-sentence  中文s2s(句子vs句子)语义匹配任务推荐使用
# shibing624/text2vec-base-chinese-paraphrase 强化了其长文本的表征能力，并在中文各NLI测试集评估达到SOTA，中文s2p(句子vs段落)语义匹配任务推荐使用，2048上下文
# GanymedeNil/text2vec-large-chinese 只有512上下文
model_name='shibing624/text2vec-base-chinese-paraphrase'
model_max_length={'GanymedeNil/text2vec-large-chinese':512,'shibing624/text2vec-base-chinese-paraphrase':2048,'BAAI/bge-large-zh':512
                  ,'BAAI/bge-large-zh-noinstruct':512}



# query=['东北大学的校长是谁？']
# # sentences = ['如何更换花呗绑定银行卡', '花呗更改绑定银行卡','花呗？不如白条！','母猪养育指导手册']
# # Tokenize sentences



# encoded_query= tokenizer(query, padding=False, truncation=True, return_tensors='pt')
# # Compute token embeddings


# # Perform pooling. In this case, mean pooling.

# query_embeddings=mean_pooling(model_query_output, encoded_query['attention_mask'])


# # print("Sentence embeddings:")
# # print(sentence_embeddings)
# # print(len(sentence_embeddings))
# # print([sentence_embeddings[i].norm(1) for i in range(len(sentence_embeddings))])

# # model_query_embeddings=model_query_embeddings.t()

# score=cos_sim(query_embeddings,sentence_embeddings)
# sorted_values, sorted_indices = torch.topk(score, 5, dim=1, largest=True, sorted=True)
# # distance=torch.matmul(sentence_embeddings,model_query_embeddings[0])
# # sorted_values, sorted_indices = torch.sort(distance,descending=True)
# print(list(zip(sorted_values,sorted_indices)))

from torch.utils.data import Dataset,DataLoader
class documentDataset(Dataset):
    def __init__(self,document) -> None:
        super().__init__()
        self.data=document
    def __getitem__(self, index) -> Any:
        return self.data[index]
    def __len__(self):
        return len(self.data)



        


class Sent2VecEmbeddings():
    def __init__(self,model_name='BAAI/bge-large-zh-v1.5'):
        # 'GanymedeNil/text2vec-large-chinese':512,'shibing624/text2vec-base-chinese-paraphrase':2048,'BAAI/bge-large-zh':512
        # new BAAI/bge-large-zh-v1.5
        # BAAI/bge-large-en-v1.5
        self.model_name = model_name
        self.tokenizer = AutoTokenizer.from_pretrained(model_name,model_max_length=model_max_length.get(model_name,512))
        # self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModel.from_pretrained(model_name).to_bettertransformer()
        self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
        # self.device = "cpu"
        self.model.to(self.device)


    def my_collate(self,batch):
        
        return self.tokenizer(batch, padding=True, truncation=True, return_tensors='pt').to(self.device)


    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        # self.model.cuda()
        """Embed search docs."""
        # print(len(texts),max(len(x) for x in texts))
        dataset=documentDataset(texts)
        
        dataloader=DataLoader(dataset, batch_size=256, shuffle=False, num_workers=0,
                             collate_fn=self.my_collate, pin_memory=False) #GPU张量没法pin的，都不在CPU上哪来内存
        # encoded_input = self.tokenizer(texts, padding=True, truncation=True, return_tensors='pt')
        print(f'model_name={self.model_name},hidden_size={self.model.config.hidden_size}')
        doc_embeddings=torch.empty(0,self.model.config.hidden_size)
        from tqdm import tqdm 
        data_loader_with_progress = tqdm(dataloader, desc="Processing docs into embeddings", ncols=100)
        for encoded_input in data_loader_with_progress:
            with torch.inference_mode():
                model_output = self.model(**encoded_input)
                mean_output = self.mean_pooling(model_output, encoded_input['attention_mask']).to('cpu')
            doc_embeddings = torch.cat((doc_embeddings, mean_output), dim=0)
        # 这里加0.1是因为，反正是保序变换了，整大一点，完全没关系，除了会影响相似度的绝对值，不会影响序列关系，之前碰到阴间case，可能是精度问题，导致最大精度稍微溢出1.0
        maxNorm=torch.max(torch.norm(doc_embeddings,dim=1))
        print(f'doc_embeddings的shape是{doc_embeddings.shape}，doc_embeddings中最大的范数是{maxNorm}')
        # 下面这个行为是错误的！因为不是保序变换，正确的做法可能得是算出max norm，然后直接除？
        # doc_embeddings=torch.nn.functional.normalize(doc_embeddings, p=2, dim=1)
        # doc_embeddings=doc_embeddings/maxNorm
        # print(f'doc_embeddings的shape是{doc_embeddings.shape}，norm之后，doc_embeddings中最大的范数是{torch.max(torch.norm(doc_embeddings,dim=1))}')
        return doc_embeddings,maxNorm

    def embed_query(self, text: str,multiR:bool=False) -> List[float]:
        # print(f'真正的query?{text}')
        # print(self.model_name)
        # if self.model_name=='BAAI/bge-large-zh' or self.model_name=='BAAI/bge-large-zh-v1.5':
        #     text="为这个句子生成表示以用于检索相关文章："+text
        """Embed search docs."""
        
        encoded_input = self.tokenizer(text, padding=True, truncation=True, return_tensors='pt').to(self.device)
        with torch.inference_mode():
            model_output = self.model(**encoded_input)
        query_embedding = self.mean_pooling(model_output, encoded_input['attention_mask'])[0]
        
        # 池化是求平均数，和归一化还是有区别的，这个归一化是为了按相似度过滤的时候，可以成功的把相似度值压缩到[0,1]区间之内
        # query_embedding=torch.nn.functional.normalize(query_embedding, p=2, dim=0)
        # if multiR:
        #     return model_output.last_hidden_state,query_embedding
        return query_embedding
    

    def embed_query_for_multi_turn(self, text: str) -> List[float]:
        # print(f'真正的query?{text}')
        # print(self.model_name)
        # if self.model_name=='BAAI/bge-large-zh' or self.model_name=='BAAI/bge-large-zh-v1.5':
        #     text="为这个句子生成表示以用于检索相关文章："+text
        """Embed search docs."""
        
        encoded_input = self.tokenizer(text, padding=True, truncation=True, return_tensors='pt').to(self.device)
        with torch.inference_mode():
            model_output = self.model(**encoded_input)
        query_embedding = self.mean_pooling(model_output, encoded_input['attention_mask'])[0]
        
        # 池化是求平均数，和归一化还是有区别的，这个归一化是为了按相似度过滤的时候，可以成功的把相似度值压缩到[0,1]区间之内
        # query_embedding=torch.nn.functional.normalize(query_embedding, p=2, dim=0)
        return model_output.last_hidden_state,query_embedding

    def mean_pooling(self,model_output, attention_mask):
        """
        本来参数是model_output，但是我在外面抽出了最后一层状态，这样有很大的问题，因为这里依赖于attention矩阵！好在这个正则化相当于自身的归一。
        之所以需要这一步，是因为pad位置的输出还不一样，而且也不是0，为了消除这个影响，只能手动对他们置于0
        """
        token_embeddings = model_output.last_hidden_state  # First element of model_output contains all token embeddings
        # 这个操作使得mask和embedding是一个维度了，本来一个是bsz x seqlen x hdz，mask是bsz x seqlen的，unsqueeze之后就是bsz x seqlen x1
        # 然后在最后一维复制hdz次，转成float是因为下面要运算
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        # 需要被mask掉的位置就会失去他的光辉，沿着句长维度求和。clamp是把数压缩在min，max之间，也是沿着句长维度求和，
        # 之所以min取了一个数字，是因为全0的问题？或者下溢出？
        return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)


# with open('/root/DPR/summary_doc.json') as file:
#     summary_data=json.load(file)
# doc=[x['content'] for x in summary_data]
# s=Sent2VecEmbeddings(model_name=model_name)
# a=s.embed_documents(doc)
# b=s.embed_query('我是东北大学校长')
# print(torch.matmul(a,b))