from transformers import GPT2Tokenizer, GPT2Model

from transformers import AutoTokenizer, AutoModel
from sklearn.metrics.pairwise import cosine_similarity

import torch


def main_func(sentence1, sentence2, model_name="hfl/chinese-roberta-wwm-ext"):
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(model_name)
    inputs1 = tokenizer(sentence1, return_tensors="pt", padding=True, truncation=True)
    inputs2 = tokenizer(sentence2, return_tensors="pt", padding=True, truncation=True)

    with torch.no_grad():
        # 获取嵌入
        embeddings1 = model(**inputs1).last_hidden_state[:, 0, :]  # [CLS]令牌的嵌入
        embeddings2 = model(**inputs2).last_hidden_state[:, 0, :]

        # 或者获取句子的平均嵌入
        # embeddings1 = model(**inputs1).last_hidden_state.mean(dim=1)
        # embeddings2 = model(**inputs2).last_hidden_state.mean(dim=1)

    cosine_sim = cosine_similarity(embeddings1, embeddings2)

    print(f"Cosine Similarity: {cosine_sim[0][0]}")
    pass


if __name__ == '__main__':
    s1 = "中文测试"
    s2 = "英文测试"
    main_func(s1, s2)
