import logging
from typing import Optional
import hashlib
import os

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
os.environ["HF_HOME"] = os.path.abspath(
    os.path.join(os.path.dirname(__file__), ".hf_cache")
)

import chromadb
from huggingface_hub import snapshot_download
from sentence_transformers import SentenceTransformer

logger = logging.getLogger(__name__)

# 默认的集合名称
DEFAULT_COLLECTION_NAME = "rag_system_collection"
# 默认的向量化模型的名称
DEFAULT_MODEL_NAME = "all-MiniLM-L6-v2"

# 默认数据库的存放路径
DEFAULT_DB_PATH = "./chroma_db"
# 定义向量模型的全局变量
_model: Optional[SentenceTransformer] = None
# 定义全局的chromadb客户端变量
_client: Optional[chromadb.PersistentClient] = None


# ...existing code...
def get_global_embedding_model():
    """
    获取全局的向量模型实例，优先从本地缓存加载，无法时尝试从 HF_ENDPOINT 下载到本地缓存，
    最后回退到默认在线加载（如有网络）。
    """
    global _model
    if _model is None:
        model_id = DEFAULT_MODEL_NAME
        # 本地模型目录
        local_dir = os.path.join(
            os.environ.get("HF_HOME", "./.hf_cache"), model_id.replace("/", "_")
        )
        try:
            # 如果本地已存在模型，优先从本地加载
            if os.path.isdir(local_dir) and os.listdir(local_dir):
                logger.info(f"从本地缓存加载模型: {local_dir}")
                _model = SentenceTransformer(local_dir)
            else:
                # 尝试从指定 endpoint 下载到本地缓存
                try:
                    logger.info(
                        f"尝试从镜像下载模型到本地缓存: {model_id} -> {local_dir}"
                    )
                    snapshot_download(
                        repo_id=f"sentence-transformers/{model_id}",
                        repo_type="model",
                        local_dir=local_dir,
                        endpoint=os.environ.get("HF_ENDPOINT"),
                        resume_download=True,
                    )
                    _model = SentenceTransformer(local_dir)
                except Exception as e:
                    logger.warning(
                        f"镜像下载失败或不可用: {e}. 尝试直接按名称加载（需网络）"
                    )
                    # 回退：按名称加载（将会联网）
                    _model = SentenceTransformer(model_id)
        except Exception as e:
            logger.error(f"加载嵌入模型失败: {e}")
            raise
    return _model


def get_global_chromadb_client():
    """
    获取全局的chromadb客户端实例
    """
    global _client
    if _client is None:
        _client = chromadb.PersistentClient(path=DEFAULT_DB_PATH)
    return _client


def save_text_to_db(
    text: str,
    collection_name: str = DEFAULT_COLLECTION_NAME,
):
    """
    将文本保存到向量数据库中
    :param text: 要保存的文本
    :param collection_name: 向量存储集合名称
    """
    try:
        if not text or not text.strip():
            logger.warning("尝试保存的文本为空，跳过保存操作。")
            return ""
        # 获取全局的向量模型
        embedding_model = get_global_embedding_model()
        # 获取全局的chromadb客户端
        client = get_global_chromadb_client()
        collection = client.get_or_create_collection(name=collection_name)
        text_id = hashlib.md5(text.encode("utf-8")).hexdigest()
        print("text_id", text_id)
        existing = collection.get(ids=[text_id])
        print("existing", existing)
        if existing and existing.get("ids"):
            logger.info(
                f"文本已存在于集合 '{collection_name}' 中，ID: {text_id}，跳过保存。"
            )
            return text_id
        # 生成文本的embedding模型处理结果，通过tolist转成列表
        print("text", text)
        embedding = embedding_model.encode([text])[0].tolist()
        # print("embedding", embedding)
        # 将文本和对应的embedding保存到集合中
        collection.add(
            documents=[text],
            embeddings=[embedding],
            ids=[text_id],
            metadatas={"source": "rag_system"},
        )
        return text_id
    except Exception as e:
        logger.error(f"保存文本到向量数据库失败: {str(e)}")
        raise
