import hashlib
import os
from time import time

from fastapi import UploadFile

from app.common.result import Result
from app.common.where_op_symbol import WhereOpSymbol
from app.config.chroma import KnowledgeChromaClient
from app.config.logging import logger
from app.decorator.timeit import timeit
from app.llm.rerank import bge_rerank_model
from app.models.KnowledgeQueryCondition import KnowledgeQueryCondition
from app.models.corpus_result import CorpusResult
from app.reader import document_reader
from app.spliter.document_spliter import DocumentSpliter
from app.utils import file_utils
from app.utils.query_result_utils import flat_document
from app.utils.where_utils import to_and_filter


@timeit
def process(file: UploadFile) -> Result:
    """
    文档处理流程：
    :return:
    """
    # 0. 保存文件到本地
    file_dto = file_utils.save_file(file)
    try:
        # 1. 文件读取器，读取文件内容
        result = document_reader.reader_file(file_dto)
        if not result.success or result.data is None:
            return Result.fail(message="内容为空")
        # 解析得到的document内容
        documents = result.data
        # 2.document内容分片处理
        spliter = DocumentSpliter(documents)
        # 分词后Document
        splitter_documents = spliter.split()
        logger.info(f"Split blog post into {len(splitter_documents)} sub-documents.")

        # 直接操作chroma矢量数据库，而不是通过langchain来操作chroma，需要手动生成document id
        for splitter_document in splitter_documents:
            # [20250726] 计算其hashcode当作document_id
            document_id = hashlib.md5(splitter_document.page_content.encode()).hexdigest()
            splitter_document.id = document_id
            splitter_document.metadata['document_id'] = document_id

        # 3.保存到矢量数据库中
        # Document 进行映射，获取content、metadata、ids
        documents = list(map(lambda document: document.page_content, splitter_documents))
        metadata = list(map(lambda document: document.metadata, splitter_documents))
        ids = list(map(lambda document: document.id, splitter_documents))
        KnowledgeChromaClient.upsert_documents(ids=ids, metadata=metadata, documents=documents)
    finally:
        if file_dto.file_path:
            os.remove(file_dto.file_path)
    return Result.ok()


@timeit
def search(condition: KnowledgeQueryCondition) -> Result:
    """
    文档内容检索
    :param condition: 检索条件
    :return: Result
    """
    # 0.条件转换
    meta_dict = None if condition.meta is None else condition.meta.model_dump()
    where = to_and_filter(meta_dict, op=WhereOpSymbol.EQ)
    # 1. chroma 向量化搜索，这里直接使用collection配置的向量化model
    query_result = KnowledgeChromaClient.search_documents(query=condition.query, where=where, top_k=5)
    logger.info(f"input query: {condition.query}, Search query result:{query_result}")

    start_time = time()
    # 平铺查询结果
    flat_documents = flat_document(query_result)
    # rerank 重排序
    # https://www.sbert.net/docs/quickstart.html#cross-encoder
    # 返回结果包括，匹配内容、排序ID、相关性得分
    rerank_documents = []
    if flat_documents is not None and len(flat_documents) > 0:
        rerank_documents = bge_rerank_model.rank(query=condition.query, documents=flat_documents,
                                                 return_documents=True, top_k=3)

    logger.info(f"rerank elapsed time::{time() - start_time}")

    # 结果映射处理
    corpus_result = CorpusResult.from_list(rerank_documents)
    # 重排序完成之后，看相关性如果超过0.8
    final_result = [cos for cos in corpus_result if cos.score > 0.6]
    return Result.ok(final_result)
