# pip install llama-index-embeddings-huggingface
# pip install llama-index-embeddings-instructor
# pip install llama-index-embeddings-ollama
# pip install modelscope -U  # 添加ModelScope依赖

import os

from modelscope import snapshot_download  # 导入魔塔下载工具
from pymilvus import model

# 设置缓存路径
current_dir = os.path.abspath(os.path.dirname(__file__))
relative_path = os.path.join(current_dir, "..", "embed_cache")
cache_folder = os.path.abspath(relative_path)

# 设置ModelScope缓存路径
os.environ['MODELSCOPE_CACHE'] = cache_folder  # 关键修改：指定魔塔的缓存路径

print("cache_folder:", cache_folder)

# 模型名称映射（魔塔上的模型路径）
MODEL_MAP = {
    "large": "BAAI/bge-large-zh-v1.5",  # 大型中文模型
    "small": "BAAI/bge_small_zh_v1.5",  # 小型中文模型
    "m3": "BAAI/bge_m3_embedding"  # M3多语言模型
}


def download_model(model_type="large"):
    """通过魔塔下载模型并返回本地路径"""
    model_name = MODEL_MAP[model_type]
    model_path = snapshot_download(
        model_name,
        cache_dir=cache_folder,
        revision='master'  # 可以指定版本号，如v1.0.0
    )
    return model_path


def get_embedding_function(model_type="large", **kwargs):
    """获取嵌入函数（自动下载模型）"""
    # 下载模型并获取本地路径
    local_model_path = download_model(model_type)

    # 创建嵌入函数
    return model.dense.SentenceTransformerEmbeddingFunction(
        model_name=local_model_path,  # 关键修改：使用本地模型路径
        device="cpu",
        cache_folder=cache_folder,
        **kwargs
    )


# 使用示例 -----------------------------------------------------------------
if __name__ == "__main__":
    # 初始化嵌入函数（自动下载模型）
    embed_func = get_embedding_function("large")

    # 测试嵌入
    embeddings = embed_func(["测试文本"])
    print(f"Embedding dimension: {len(embeddings[0])}")