 
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..'))
import numpy as np
import json 
from Clip.excel2Json import ExcelData2Json
from transformers import AutoTokenizer, AutoModel
from sklearn.metrics.pairwise import cosine_similarity
import torch
import os
import logging    
logging.basicConfig(format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',level=logging.DEBUG,datefmt='%Y-%m-%d %H:%M:%S')  

def loadExcel():
    excelData = ExcelData2Json().loadData2Path("Clip/biaozhu.xlsx")
    result = json.loads(excelData)

    sentences = []
    for i in range(len(result)):
        item = result[i]
        name = item['建筑名称']
        prompt = item['提示词']

        text = ''
        if name in prompt:
            text = prompt
        else:
           text = f'{name}、{prompt}'
        text = text.replace('、',',')
        sentences.append(text)
    return sentences

class clipSearchEngine():
    def __init__(self) -> None:
        dataRoot = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../DownloadRoot/models')

        # modelPath = 'sentence-transformers/bert-base-nli-mean-tokens'
        # modelPath = "ckiplab/bert-base-chinese"
        modelPath = 'hfl/chinese-bert-wwm'

        self.tokenizer = AutoTokenizer.from_pretrained(os.path.join(dataRoot,modelPath))
        self.model = AutoModel.from_pretrained(os.path.join(dataRoot,modelPath))
   
    def compute(self,sentences):

        # 初始化字典来存储
        tokens = {'input_ids': [], 'attention_mask': []}

        for sentence in sentences:
            # 编码每个句子并添加到字典
            new_tokens = self.tokenizer.encode_plus(sentence, max_length=128,
                                            truncation=True, padding='max_length',
                                            return_tensors='pt')
            tokens['input_ids'].append(new_tokens['input_ids'][0])
            tokens['attention_mask'].append(new_tokens['attention_mask'][0])

        # 将张量列表重新格式化为一个张量
        tokens['input_ids'] = torch.stack(tokens['input_ids'])
        tokens['attention_mask'] = torch.stack(tokens['attention_mask'])

        # We process these tokens through our model:
        outputs = self.model(**tokens)
        # print(outputs.keys())


        # The dense vector representations of our text are contained within the outputs 'last_hidden_state' tensor, which we access like so:
        embeddings = outputs.last_hidden_state
        # print(embeddings)
        # print(embeddings.shape)

        attention_mask = tokens['attention_mask']
        # print(attention_mask.shape)


        mask = attention_mask.unsqueeze(-1).expand(embeddings.size()).float()

        # print(mask.shape)

        masked_embeddings = embeddings * mask
        # print(masked_embeddings.shape)

        summed = torch.sum(masked_embeddings, 1)
        # print(summed.shape)
        summed_mask = torch.clamp(mask.sum(1), min=1e-9)
        summed_mask.shape

        mean_pooled = summed / summed_mask

        # convert from PyTorch tensor to numpy array
        mean_pooled = mean_pooled.detach().numpy()
        return mean_pooled

    def genderateFeatureCache(self, sentences,savePath):
        features = self.compute(sentences)
        np.save(savePath,features)
    
    def loadFeatureCache(self,loadPath):
        self.features = np.load(loadPath)
      
    def selectNearset(self, input, num): 
        inputFeature = self.compute([input])
        # calculate
        loss = cosine_similarity(
           inputFeature,
           self.features
        )[0]
        
        #用-loss为降序排列
        maxIndices = np.argsort(-loss)[:num]
        return maxIndices

if __name__ == '__main__':
    
    engine = clipSearchEngine()
    featurePath = 'Clip/features.npy'
    if not os.path.exists(featurePath):
        sentences = loadExcel()
        engine.genderateFeatureCache(sentences,featurePath)
    engine.loadFeatureCache(featurePath)


    input = '欧式古典建筑,黄色外墙'
    maxIndices = engine.selectNearset(input,5)
     
    sentences = loadExcel()
    for i in maxIndices:
        print(sentences[i])