from typing import Optional, List
import chromadb
import logging
import llm
from sentence_transformers import SentenceTransformer
import os
from huggingface_hub import snapshot_download

logging.basicConfig(
    level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
)
logger = logging.getLogger(__name__)
# 默认的集合名称
DEFAULT_COLLECTION_NAME = "rag_system_collection"
DEFAULT_N_RESULTS = 2
# 默认的向量化模型的名称
DEFAULT_MODEL_NAME = "all-MiniLM-L6-v2"

# 定义向量模型的全局变量
_model: Optional[SentenceTransformer] = None
# 定义全局的chromadb客户端变量
_client: Optional[chromadb.PersistentClient] = None
_collectioin: Optional[chromadb.Collection] = None

# 默认数据库的存放路径
DEFAULT_DB_PATH = "./chroma_db"


def _get_model():
    """
    获取全局的向量模型实例，优先从本地缓存加载，无法时尝试从 HF_ENDPOINT 下载到本地缓存，
    最后回退到默认在线加载（如有网络）。
    """
    global _model
    if _model is None:
        model_id = DEFAULT_MODEL_NAME
        # 本地模型目录
        local_dir = os.path.join(
            os.environ.get("HF_HOME", "./.hf_cache"), model_id.replace("/", "_")
        )
        try:
            # 如果本地已存在模型，优先从本地加载
            if os.path.isdir(local_dir) and os.listdir(local_dir):
                logger.info(f"从本地缓存加载模型: {local_dir}")
                _model = SentenceTransformer(local_dir)
            else:
                # 尝试从指定 endpoint 下载到本地缓存
                try:
                    logger.info(
                        f"尝试从镜像下载模型到本地缓存: {model_id} -> {local_dir}"
                    )
                    snapshot_download(
                        repo_id=f"sentence-transformers/{model_id}",
                        repo_type="model",
                        local_dir=local_dir,
                        endpoint=os.environ.get("HF_ENDPOINT"),
                        resume_download=True,
                    )
                    _model = SentenceTransformer(local_dir)
                except Exception as e:
                    logger.warning(
                        f"镜像下载失败或不可用: {e}. 尝试直接按名称加载（需网络）"
                    )
                    # 回退：按名称加载（将会联网）
                    _model = SentenceTransformer(model_id)
        except Exception as e:
            logger.error(f"加载嵌入模型失败: {e}")
            raise
    return _model


def _get_client():
    global _client
    if _client is None:
        _client = chromadb.PersistentClient(path=DEFAULT_DB_PATH)
    return _client


def get_query_embedding(query: str) -> List[float]:
    model = _get_model()
    embedding = model.encode([query])[0].tolist()
    return embedding


def _get_collection(collection_name: str = DEFAULT_COLLECTION_NAME):
    global _collectioin
    if _collectioin is None:
        client = _get_client()
        _collectioin = client.get_or_create_collection(collection_name)
    return _collectioin


def retrieve_related_chunks(
    query_embedding: List[float],
    n_results: int = DEFAULT_N_RESULTS,
    collection_name: str = DEFAULT_COLLECTION_NAME,
):
    try:
        collection = _get_collection(collection_name)
        # 在指定的集合中做相似度的检索，找到最相关的n_results条返回结果
        results = collection.query(
            query_embeddings=[query_embedding], n_results=n_results
        )
        related_chunks = results.get("documents")
        if not related_chunks or not related_chunks[0]:
            raise ValueError("未检索到相关的内容")
        return related_chunks[0]
    except Exception as e:
        logger.error(f"向量检索失败:{str(e)}")
        raise


def query_rag(
    query: str,
    n_results: int = DEFAULT_N_RESULTS,
    collection_name: str = DEFAULT_COLLECTION_NAME,
):
    """
    查询函数： 向量检索+LLM生成答案
    参数
       query 用户查询的问题
       n_results 检索的文档的数量，默认为3
       collection_name 集合名称
    """
    # 1.将查询文本转成向量
    query_embedding = get_query_embedding(query)
    # 2.基于查询向量做检索
    related_chunks = retrieve_related_chunks(
        query_embedding, n_results=n_results, collection_name=collection_name
    )
    print("related_chunks", related_chunks)
    # 3.将检索到的文本块合并为上下文，拼接提示词
    context = "\n".join(related_chunks)
    prompt = f"""
    已知信息：
    {context}
    请根据上述的内容回答用户的问题：{query}
    """
    print(prompt)
    answer = llm.invoke(prompt)
    return answer
