import os

# 【避坑】使用 Hugging Face 国内镜像。否则拉库时会报错：TimeoutError: timed out
# 【避坑】设置环境变量，且设置时机确保在所有代码之前
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

from sentence_transformers import SentenceTransformer, util
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
import warnings
from app.config import config

CACHE_FOLDER = config.get("folder", "model_cache_folder")


async def embed(sentences: list):
    # 加载预训练模型，all-MiniLM-L6-v2 是一个小型但高效的 Sentence Transformer 模型
    # 其向量维度固定为 384，不同模型的维度可能不同
    model = SentenceTransformer('all-MiniLM-L6-v2', cache_folder=CACHE_FOLDER)
    embeddings = model.encode(sentences)
    return embeddings


# 计算余弦相似度
async def cos_sim(embeddingsA, embeddingsB):
    res = util.cos_sim(embeddingsA, embeddingsB)
    return res


def getMpnetModel():
    # 【避坑】只执行一次，很大很慢，缓存到指定目录，不然每次跑都要下
    SentenceTransformer("sentence-transformers/all-mpnet-base-v2", device="cpu", cache_folder=CACHE_FOLDER)
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")  # 使用此方法完全抑制警告
        model = SentenceTransformerEmbeddings(
            model_name="sentence-transformers/all-mpnet-base-v2",  # 支持任意 Sentence Transformers 模型
            model_kwargs={"device": "cpu"},  # 可选：指定运行设备
            encode_kwargs={
                "normalize_embeddings": True,  # BGE 模型需开启归一化
                "batch_size": 32  # 按需调整批处理大小
            }
        )
        return model
