import torch
from transformers import BertTokenizer
from sentence_transformers import SentenceTransformer
import numpy as np

class GetEmbedding:
    def __init__(self, ROBERTA= './roberta-wwm-finetune'):
        #self.pretrain_data = "/home/work/bert-base-chinese"
        # ROBERTA = '/home/centos/ll/llms/weights/m3e_model'
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = SentenceTransformer(ROBERTA, device=self.device)
        self.max_len = 32
    def get_sim_embedding(self,text):
        embedding_words = get_embeddings(self.model, [text], self.device)
        #print(embedding_words)
        return embedding_words
def normalize(x, axis=-1):
    """Normalizing to unit length along the specified dimension.
    Args:
      x: pytorch Variable
    Returns:
      x: pytorch Variable, same shape as input
    """
    x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)#2是范数计算中的幂指数值,就是取2范式
    return x
def get_embeddings(model, source, device):
    """模型评估函数
    批量预测, batch结果拼接, 一次性求spearman相关度
    """
    model.eval()
    print("进入内容搜索.............")
    sim_tensor = torch.tensor([], device=device)
    with torch.no_grad():
        source_embeddings = model.encode(source, convert_to_tensor=True)
        sim_tensor = torch.cat((sim_tensor, source_embeddings), dim=0)
        print("get_embeddings")
        print(sim_tensor.shape)
        sim_tensor = normalize(sim_tensor)
        numpy_save = sim_tensor.cpu().numpy()
    return numpy_save
