# -*- coding: utf-8 -*-
import json
import uuid
import traceback
from typing import List, Tuple, Dict, Optional

from loguru import logger
from langchain_core.documents import Document
from pydantic import BaseModel
from fastapi import Request
from sse_starlette import EventSourceResponse

from configs.kb_config import ES_KB_INDEX_NAME, ES_KB_EMERGENCY_NAME
from schemas.response_entity import JsonModel
from db.repository.knowledge import db_get_files_in_kb
from db.repository.conversation import db_add_conversation, db_get_conversations, db_save_dialog, db_search_dialog, \
    db_update_conversion_answer
from utils.rewrite_query import rewrite_query
from service.es_service import ELASTICSEARCH, ELASTICSEARCH_EMERGENCY, ik_match_search
from service.llm_service.deepseek_chat import deepseek_model

from .utils import remove_attach_id, replace_attach_id, response_source_text, prepare_prompt, invoke_glm


async def chat_iter(query: str,
                    dialog_id: str,
                    references: List[Tuple[Document, float]],
                    histories: List[Dict],
                    origin_query: str,
                    token: str
                    ):
    """
    处理参考列表并返回
    构建prompt
    模型回复
    """
    try:
        references_json_returned = []
        references_json_model = []
        for reference, score in references:
            if len(reference.page_content) < 15:
                continue
            # 将占位符替换为可渲染内容
            processed_content = replace_attach_id(reference, token)

            references_json_returned.append({
                "file_id": reference.metadata.get("file_id"),
                "file_name": reference.metadata.get("file_name"),
                "score": str(score),
                "content": processed_content
            })

        conversation = db_add_conversation(origin_query, dialog_id=dialog_id, references=references_json_returned)
        yield json.dumps(
            {"reference": references_json_returned, "dialog_id": str(dialog_id), "conversion": str(conversation.id)},
            ensure_ascii=False)

        res = ''
        async for i in invoke_glm(query, references_json_returned):
            if i:
                res += i
                yield i

        db_update_conversion_answer(conversation_id=conversation.id, answer=res)
        final_response = json.dumps({"response": res}, ensure_ascii=False)
        yield final_response

    except Exception as e:
        import traceback
        traceback.print_exc()
        yield {
            "event": "error",
            "data": json.dumps({
                "code": 500,
                "data": None,
                "msg": "回答生成失败"
            }, ensure_ascii=False)
        }


class ChatEntity(BaseModel):
    query: str
    dialog_id: Optional[uuid.UUID | str] = None
    token: str
    kb_ids: List[uuid.UUID] = None


async def keyword_chat(request: Request, payload: ChatEntity):
    # 查数据库查出对应的文件id，带着文件id去查向量库，向量库中数据没必要带kb_id
    # 如果要根据文件属性查，也可以先通过调用文件查询接口查出文件id，再传入向量库查找
    try:
        user = request.state.user  # 直接获取
        headers = None

        if payload.kb_ids is None:
            return JsonModel(code=404, data=None, msg=f"请选择知识库").to_response(headers=headers)

        user_id = user.id
        if payload.dialog_id:
            logger.debug(f"user_id: {user_id}")
            dialog = db_search_dialog(dialog_id=payload.dialog_id, user_id=user_id, dialog_type=1)
            if dialog is None:
                return JsonModel(code=404, data=None, msg=f"对话不存在,请更换对话").to_response(headers=headers)
        else:
            dialog = db_save_dialog(payload.query, user.id, dialog_type=1)

        file_ids = [str(i.id) for kb_id in payload.kb_ids for i in db_get_files_in_kb(kb_id)[0]]

        # Vector search
        # logger.debug(str(time.time()))
        new_query = rewrite_query(payload.query)  # 实体名称替换
        # IK match search
        # logger.debug(str(time.time()))
        ik_reference_list_1: List[Document] = ik_match_search(ELASTICSEARCH, index_name=ES_KB_INDEX_NAME, target=new_query, file_ids=file_ids,
                                                              top_k=20)
        logger.debug(f"ik_reference_list length: {len(ik_reference_list_1)}")

        ik_reference_list = ik_reference_list_1

        reference_rerank_list = [(doc, 0.0) for doc in ik_reference_list[:20]]

        # Hybrid search and rerank
        # logger.debug(str(time.time()))
        # rerank_results: List[Tuple[Document, float]] = hybird_tools.model_rerank(
        #     new_query, ik_reference_list, [], final_top_k=20, reranker_threshold=0.6
        # )
        # logger.debug(f"rerank_results: {rerank_results}")

        # reference_rerank_list = []
        # seen_texts = set()
        # for doc, score in rerank_results:
        #     if doc.page_content not in seen_texts:
        #         reference_rerank_list.append((doc, score))
        # reference_rerank_list.sort(key=lambda x: x[1], reverse=True)  # True倒序，False正叙

        # Get conversation history
        conversions = list(db_get_conversations(dialog.id, user.id))
        conversions = conversions[::-1]
        histories = [message for conversion in conversions for message in conversion.to_messages()]

    except Exception as e:
        logger.error(str(traceback.format_exc()))
        return JsonModel(code=500, data=None, msg=f"回答生成失败").to_response()  # 将异常转换为 HTTP 异常

    return EventSourceResponse(
        chat_iter(query=new_query, dialog_id=dialog.id, histories=histories, references=reference_rerank_list,
                  origin_query=payload.query, token=payload.token)
    )
