from transformers import AutoTokenizer, AutoModel
import torch
import os
import sys
# 添加父级目录到路径
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from my_common import load_auto_model, load_auto_tokenizer, load_flag_model

# 池化函数，用于数据降维，高维矩阵变成低维矩阵
def pooling(last_hidden_state: torch.Tensor, pooling_method='cls', attention_mask: torch.Tensor = None):
    if pooling_method == 'cls':
        return last_hidden_state[:, 0]
    elif pooling_method == 'mean':
        s = torch.sum(last_hidden_state * attention_mask.unsqueeze(-1).float(), dim=1)
        d = attention_mask.sum(dim=1, keepdim=True).float()
        return s / d


# encode 函数
def _encode(sentences, max_length=512, convert_to_numpy=True):

    # handle the case of single sentence and a list of sentences 处理单句和句子列表的情况
    input_was_string = False
    if isinstance(sentences, str):
        sentences = [sentences]
        input_was_string = True

    inputs = tokenizer(
        sentences, 
        padding=True, 
        truncation=True, 
        return_tensors='pt', 
        max_length=max_length
    )

    last_hidden_state = model(**inputs, return_dict=True).last_hidden_state
    
    embeddings = pooling(
        last_hidden_state, 
        pooling_method='cls', 
        attention_mask=inputs['attention_mask']
    )

    # normalize the embedding vectors
    embeddings = torch.nn.functional.normalize(embeddings, dim=-1)

    # convert to numpy if needed
    if convert_to_numpy:
        embeddings = embeddings.detach().numpy()

    return embeddings[0] if input_was_string else embeddings


if __name__ == '__main__':
    model = load_auto_model()
    tokenizer = load_auto_tokenizer()

    sentences = ["embedding", "I love machine learning and nlp"]
    model.eval()
    
    inputs = tokenizer(
        sentences, 
        padding=True, 
        truncation=True, 
        return_tensors='pt', 
        max_length=512
    )

    print(inputs)

    last_hidden_state = model(**inputs, return_dict=True).last_hidden_state
    print(last_hidden_state.shape)

    embeddings = pooling(
        last_hidden_state, 
        pooling_method='cls', 
        attention_mask=inputs['attention_mask']
    )
    print(embeddings.shape)

    embeddings = _encode(sentences)
    print(f"Embeddings:\n{embeddings}")

    # 计算相似度
    scores = embeddings @ embeddings.T
    print(f"Similarity scores:\n{scores}")

    # FlagModel 方法加载模型
    flag_model = load_flag_model()
    embeddings = flag_model.encode(sentences)
    print(f"model Embeddings:\n{embeddings}")
    scores = embeddings @ embeddings.T
    print(f"model Similarity scores:\n{scores}")