from transformers import AutoTokenizer, AutoModel
import torch
import os
import logging
# First we initialize our model and tokenizer:

dataRoot = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../DownloadRoot/models')
tokenizer = AutoTokenizer.from_pretrained(os.path.join(dataRoot,'sentence-transformers/bert-base-nli-mean-tokens'))
model = AutoModel.from_pretrained(os.path.join(dataRoot,'sentence-transformers/bert-base-nli-mean-tokens'))


def compute():
    # Then we tokenize the sentences just as before:

    sentences = [
        "Yellow Crane Tower",
        "The new Yellow Crane Tower is regarded as the symbol of Wuhan city",
        "Three years later, the coffin was still full of Jello.",
        "The fish dreamed of escaping the fishbowl and into the toilet where he saw his friend go.",
        "The person box was packed with jelly many dozens of months later.",
        "Standing on one's head at job interviews forms a lasting impression.",
        "It took him a month to finish the meal.",
        "He found a leprechaun in his walnut shell.",
        "Yellow Crane Tower, located on Snake Hill in Wuchang, is one of the Three Famous Towers South of Yangtze River (the other two: Yueyang Tower in Hunan and Tengwang Tower in Jiangxi)."
    
    ]

    # 初始化字典来存储
    tokens = {'input_ids': [], 'attention_mask': []}

    for sentence in sentences:
        # 编码每个句子并添加到字典
        new_tokens = tokenizer.encode_plus(sentence, max_length=128,
                                        truncation=True, padding='max_length',
                                        return_tensors='pt')
        tokens['input_ids'].append(new_tokens['input_ids'][0])
        tokens['attention_mask'].append(new_tokens['attention_mask'][0])

    # 将张量列表重新格式化为一个张量
    tokens['input_ids'] = torch.stack(tokens['input_ids'])
    tokens['attention_mask'] = torch.stack(tokens['attention_mask'])

    # We process these tokens through our model:

    outputs = model(**tokens)
    # print(outputs.keys())



    logging.info('start dist')
    # The dense vector representations of our text are contained within the outputs 'last_hidden_state' tensor, which we access like so:
    embeddings = outputs.last_hidden_state
    # print(embeddings)
    # print(embeddings.shape)

    attention_mask = tokens['attention_mask']
    # print(attention_mask.shape)


    mask = attention_mask.unsqueeze(-1).expand(embeddings.size()).float()

    # print(mask.shape)

    masked_embeddings = embeddings * mask
    # print(masked_embeddings.shape)

    summed = torch.sum(masked_embeddings, 1)
    # print(summed.shape)
    summed_mask = torch.clamp(mask.sum(1), min=1e-9)
    summed_mask.shape

    mean_pooled = summed / summed_mask

    from sklearn.metrics.pairwise import cosine_similarity

    # convert from PyTorch tensor to numpy array
    mean_pooled = mean_pooled.detach().numpy()

    # calculate
    loss = cosine_similarity(
        [mean_pooled[0]],
        mean_pooled[1:]
    )
    logging.info('end dist')

    print(loss)


if __name__ == '__main__':

    logging.basicConfig(format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',level=logging.DEBUG,datefmt='%Y-%m-%d %H:%M:%S'
    )   

    while True:
        
        logging.info('start compute')
        compute()
        logging.info('end compute')