# import torch
# from sentence_transformers import SentenceTransformer
# from transformers import AutoTokenizer
#
# # 必须通过 encode() 处理输入
# texts = "哈哈哈我是狗"
#
#
# class TextToEmbedding(torch.nn.Module):
#     def __init__(self, device="cpu"):
#         super().__init__()
#         self.device = torch.device(device)
#         self.model = SentenceTransformer('all-MiniLM-L6-v2').to(self.device)
#         self.tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
#
#     def forward(self, texts: str):
#         # 1. 确保tokenizer输出与模型同设备
#         inputs = self.tokenizer(
#             texts,
#             return_tensors="pt",
#             padding="max_length",
#             truncation=True,
#             max_length=128
#         ).to(self.device)  # 关键修复：统一设备
#
#         # 2. 模型推理
#         outputs = self.model(inputs)["sentence_embedding"]
#         return outputs
#
# mo = TextToEmbedding()
#
# print(mo(texts)[:20])
import torch
# import json
#
# # 读取文件内容并拆分为单个字符
# with open("3500常用字.txt", "r", encoding="utf-8") as f:
#     content = f.read().strip()  # 读取整个文件内容并去除首尾空格
#     chars = list(content)      # 将字符串拆分为单个字
#
# chars = chars[1:]
# chars.append(' ') # //完整的字符表
#
# # 检查重复字符
# unique_chars = set(chars)
# if len(unique_chars) != len(chars):
#     duplicates = [char for char in unique_chars if chars.count(char) > 1]
#     print(f"警告：发现重复字符: {duplicates}")
#
#     # 自动去重处理（保留第一次出现的顺序）
#     seen = set()
#     chars = [char for char in chars if not (char in seen or seen.add(char))]
#     print("已自动去除重复字符")
#
# # 建立双向映射
# word_to_id = {word: idx + 1 for idx, word in enumerate(chars)}
# id_to_word = {idx + 1: word for idx, word in enumerate(chars)}
#
# # 存储word_to_id字典
# with open("word_to_id.json", "w", encoding="utf-8") as f:
#     json.dump(word_to_id, f, ensure_ascii=False, indent=4)
#
# # 存储id_to_word字典
# with open("id_to_word.json", "w", encoding="utf-8") as f:
#     json.dump(id_to_word, f, ensure_ascii=False, indent=4)

from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity

# 加载模型
model = SentenceTransformer("BAAI/bge-small-zh-v1.5")

# 输入句子
sentences = ["蹲下来慢慢走", "往后退"]
embeddings = model.encode(sentences)

# 计算余弦相似度
similarity = cosine_similarity([embeddings[0]], [embeddings[1]])[0][0]
print(f"句子1: '{sentences[0]}'")
print(f"句子2: '{sentences[1]}'")
print(f"语义相似度: {similarity:.4f}")