
from langchain_community.embeddings import HuggingFaceBgeEmbeddings, OpenAIEmbeddings

# 选择模型
bge_model_path= "D:\\model_code\model\\bge-large-zh-v1.5"
model_kwargs = {"device": "cpu"}
encode_kwargs = {"normalize_embeddings": True}
bge_model_name = 'bge-large-zh-v1.5'


base_url = 'http://localhost:5544/v1'

def get_openai_embeddings_local():
    # 初始化嵌入模型
    hf = HuggingFaceBgeEmbeddings(
        model_name=bge_model_path,
        model_kwargs=model_kwargs,
        encode_kwargs=encode_kwargs
    )
    return hf


def get_openai_embeddings_xin():
    embeddings = OpenAIEmbeddings (
        base_url=base_url,  # 指向Xinference服务端点
        model=bge_model_name,  # 与部署模型名称一致
        api_key="EMPTY"  # 任意非空字符串（Xinference无需认证）
    )
    return embeddings


def get_xinference_embeddings_xin():
    from langchain_community.embeddings.xinference import XinferenceEmbeddings
    # 如果没有提供base_url，使用默认的本地地址
    # 返回一个XinferenceEmbeddings对象，用于与Xinference的Embedding模型交互
    return XinferenceEmbeddings(server_url=base_url.replace("/v1", ""), model_uid=bge_model_name)
