from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import LLM_MODELS, TEMPERATURE, HISTORY_LEN, MAX_TOKENS, EMBEDDING_MODEL
from configs.kb_config import SCORE_THRESHOLD, VECTOR_SEARCH_TOP_K
from configs.other_config import INSTRUCTIONS, DEFAULT_CHARACTER, DEFAULT_NICKNAME, LOCATION_FILE_PATH, SECRET_KEY, ALGORITHM
from server.utils import BaseResponse, embedding_device
from server.chat.utils import summarize_chat
from configs.logging_config import configure_logging
from loguru import logger
import datetime
import os
from server.utils import create_chat_response_event
from server.chat.utils import generate_request_id, log_content
from sse_starlette.sse import EventSourceResponse
from server.chat.utils import llm_chat
from server.db.repository.message_repository import get_message_by_user_id, add_message_to_mars_chat_with_own_pet_db, add_message_to_mars_training_mode_kb_chat_db
from server.utils import decode_token
from typing import Optional
from langchain.vectorstores.faiss import FAISS
from server.knowledge_base.kb_cache.faiss_cache import _FaissPool
from server.knowledge_base.utils import get_vs_path


# from server.chat.knowledge_base_chat_ import knowledge_base_chat
from server.chat.knowledge_base_chat_long_memory import knowledge_base_chat

knowledge_base_name = "Mars"
prompt_name = "knowledge_Mars"

configure_logging()
import json
with open(LOCATION_FILE_PATH, "r", encoding="utf-8") as f:
    locations = json.load(f)


async def mars_long_memory_chat(uid: str = Body(..., description="用户ID"),
                                pet_query: Optional[str] = Body("", description="宠物输入", examples=["恼羞成怒"]),
                                query: Optional[str] = Body("", description="用户输入", examples=["恼羞成怒"]),
                                stream: bool = Body(False, description="流式输出"),
                                nickname: Optional[str] = Body(DEFAULT_NICKNAME, description="用户昵称"),
                                character: Optional[str] = Body(DEFAULT_CHARACTER, description="性格"),
                                whether_train: Optional[bool] = Body(False, description="是否训练"),
                                request: Request = None
                                ):
    # 生成唯一ID
    request_id = generate_request_id()
    # 要求query 非空
    query = query.strip()
    if pet_query == "" and query == "":
        return BaseResponse(code=400, msg="用户输入和宠物输入不能同时为空")
    if pet_query != "" and not whether_train:
        return BaseResponse(code=401, msg="不训练时，宠物不用输入")
    if whether_train and pet_query != "" and query != "":
        # 将用户的输入和宠物的输入保存到db中
        try:
            request_id = "Training_" + request_id
            add_message_to_mars_training_mode_kb_chat_db(uid, request_id, chat_type="training_mode_chat", pet_input=pet_query, user_input=query, instruction="-1")
        except Exception as e:
            logger.error(f"保存用户和宠物的输入到db失败: {e}")
        faiss_pool = _FaissPool()
        user_kb_name = os.path.join("User", f"{uid}")
        user_vs_path = get_vs_path(knowledge_base_name=user_kb_name, vector_name=EMBEDDING_MODEL)
        if os.path.isfile(os.path.join(user_vs_path, "index.faiss")):
            embeddings = faiss_pool.load_kb_embeddings(kb_name=user_kb_name, embed_device=embedding_device(),
                                                       default_embed_model=EMBEDDING_MODEL)
            user_vector_store = FAISS.load_local(user_vs_path, embeddings, normalize_L2=True)
        else:
            user_vector_store = faiss_pool.new_vector_store()
            user_vector_store.save_local(user_vs_path)
        # 训练模式下，用户的输入就是在回答宠物的问题
        summary = await summarize_chat(query=pet_query, temperature=TEMPERATURE, prompt_name="summarize", answer=query)
        summary_str = json.dumps(summary)
        user_vector_store.add_texts([summary_str])
        user_vector_store.save_local(user_vs_path)
        logger.info(f"uid: {uid} , pet_input: {pet_query} , user_input: {query}, summary: {summary}")
        return BaseResponse(code=200, msg="训练成功", data={
            "uid": uid,
            "pet_input": pet_query,
            "user_input": query,
            "summary": summary
        })
    # 获取uid的历史对话
    try:
        histories = get_message_by_user_id(user_id=uid, history_len=HISTORY_LEN)
    except Exception as e:
        logger.error(f"Failed to get chat history: {e}")
        histories = []

    # 检查是否识别到了意图
    data_content = None
    try:
        instruction = await llm_chat(query, "Intention_Recognition_Expert_Pet", 0.1)
    except Exception as e:
        logger.error(f"Failed to recognize the instruction: {e}")
        instruction = "-1"
    city = ""
    district = ""
    if instruction in INSTRUCTIONS:
        answer = None
        request_id = "I_" + request_id
        if instruction == "5":
            try:
                info = await llm_chat(query, "Extract", 0.1)
                if info != "缺少信息":
                    info = info.split(",")
                    if len(info) == 1:
                        city = info[0]
                        district = ""
                    if len(info) == 2:
                        city = info[0]
                        district = info[1]
                if district == "None":
                    district = ""
            except Exception as e:
                logger.error(f"Failed to extract the information: {e}")
                response = await knowledge_base_chat(uid=uid,
                                                     query=query,
                                                     stream=stream,
                                                     history=histories,
                                                     knowledge_base_name=knowledge_base_name,
                                                     request_id=request_id,
                                                     score_threshold=SCORE_THRESHOLD,
                                                     top_k=VECTOR_SEARCH_TOP_K,
                                                     model_name=LLM_MODELS[0],
                                                     temperature=TEMPERATURE,
                                                     max_tokens=MAX_TOKENS,
                                                     request=request,
                                                     prompt_name=prompt_name,
                                                     nickname=nickname,
                                                     character=character)
                if stream:
                    responses = EventSourceResponse(response)
                else:
                    responses = StreamingResponse(response)
                return responses

        if stream:
            responses = EventSourceResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content, city=city, district=district))
        else:
            responses = StreamingResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content, city=city, district=district))
        chat_type = "knowledge_base_chat"
        log_content(uid, query, answer, instruction=instruction, request_id=request_id, city=city, district=district, chat_type=chat_type)
        return responses

    else:
        request_id = "D_" + request_id
        response = await knowledge_base_chat(uid=uid,
                                             query=query,
                                             stream=stream,
                                             history=histories,
                                             knowledge_base_name=knowledge_base_name,
                                             request_id=request_id,
                                             score_threshold=SCORE_THRESHOLD,
                                             top_k=VECTOR_SEARCH_TOP_K,
                                             model_name=LLM_MODELS[0],
                                             temperature=TEMPERATURE,
                                             max_tokens=MAX_TOKENS,
                                             request=request,
                                             prompt_name=prompt_name,
                                             nickname=nickname,
                                             character=character)
        if stream:
            responses = EventSourceResponse(response)
        else:
            responses = StreamingResponse(response)
        return responses
