from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import LLM_MODELS, TEMPERATURE, HISTORY_LEN, MAX_TOKENS, EMBEDDING_MODEL
from configs.kb_config import SCORE_THRESHOLD, VECTOR_SEARCH_TOP_K, GO_SERVICE_SECRET_KEY
from configs.other_config import INSTRUCTIONS, DEFAULT_CHARACTER, DEFAULT_NICKNAME, LOCATION_FILE_PATH, SECRET_KEY, \
    ALGORITHM, OCCUPATION_MAPPING
from server.utils import BaseResponse, embedding_device
from server.chat.utils import summarize_chat
from configs.logging_config import configure_logging
from loguru import logger
import datetime
import os
from server.utils import create_chat_response_event
from server.chat.utils import generate_request_id, log_content
from sse_starlette.sse import EventSourceResponse
from server.chat.utils import llm_chat
from server.db.repository.message_repository import add_message_to_mars_training_mode_kb_chat_db, get_message_by_user_id_chat_with_own_pet
from server.utils import decode_token, decode_verify_token
from typing import Optional
from langchain.vectorstores.faiss import FAISS
from server.knowledge_base.kb_cache.faiss_cache import _FaissPool
from server.knowledge_base.utils import get_vs_path
from jose import JWTError
from server.knowledge_base.kb_api import create_kb
from server.knowledge_base.utils import get_file_path
from server.knowledge_base.kb_doc_api import upload_docs_method
import hashlib
from server.db.repository.knowledge_file_repository import file_exists_in_db

# from server.chat.knowledge_base_chat_ import knowledge_base_chat
from server.chat.knowledge_base_chat_long_memory import knowledge_base_chat
from server.knowledge_base.kb_service.base import KBServiceFactory
from server.db.repository.knowledge_file_repository import get_file_name_hash_from_db, file_exists_in_db_
from server.chat.chat_fenxi import fenxi_chat
knowledge_base_name = "Mars"
prompt_name = "knowledge_Mars"

configure_logging()
import json

with open(LOCATION_FILE_PATH, "r", encoding="utf-8") as f:
    locations = json.load(f)

def create_file_name_hash(text):
    # 使用文本的哈希值作为文件名
    hash_obj = hashlib.md5(text.encode('utf-8'))
    return hash_obj.hexdigest()

async def mars_long_memory_chat_token(uid: str = Body(..., description="用户ID"),
                                      pet_query: Optional[str] = Body("", description="宠物输入",
                                                                      examples=["恼羞成怒"]),
                                      query: Optional[str] = Body("", description="用户输入", examples=["恼羞成怒"]),
                                      stream: bool = Body(False, description="流式输出"),
                                      nickname: Optional[str] = Body(DEFAULT_NICKNAME, description="用户昵称"),
                                      character: Optional[str] = Body(DEFAULT_CHARACTER, description="性格"),
                                      whether_train: Optional[bool] = Body(False, description="是否训练"),
                                      request: Request = None
                                      ):
    # 生成唯一ID
    request_id = generate_request_id()
    # 解码token
    token = request.headers.get("Authorization")
    if not token:
        return BaseResponse(code=401, msg="没有提供token")
    try:
        payload = decode_token(token, SECRET_KEY, ALGORITHM)
        logger.info(f"解析到的payload: {payload}")
        token_uid = payload.get("sub")
        if not token_uid:
            logger.error(f"token中缺少用户ID, token_uid: {token_uid}")
            return BaseResponse(code=401, msg="token中缺少用户ID")
        if token_uid != uid:
            logger.error(f"token uid: {token_uid}与请求uid: {uid}不匹配")
            return BaseResponse(code=401, msg="Token is invalid.")
    except JWTError as e:
        logger.error(f"Failed to decode token: {e}")
        return BaseResponse(code=401, msg="token解析失败")
    # 要求query 非空
    query = query.strip()
    if pet_query == "" and query == "":
        return BaseResponse(code=400, msg="用户输入和宠物输入不能同时为空")
    if pet_query != "" and not whether_train:
        return BaseResponse(code=400, msg="不训练时，宠物不用输入")
    create_kb(knowledge_base_name=uid, vector_store_type="faiss", embed_model=EMBEDDING_MODEL)
    if whether_train and pet_query == "" and query != "":

        
        try:
            request_id = "Training_" + request_id
            add_message_to_mars_training_mode_kb_chat_db(uid, request_id, chat_type="training_mode_chat",
                                                         user_input=query, instruction="-1")
        except Exception as e:
            logger.error(f"保存用户的输入到db失败: {e}")
            raise f"保存用户的输入到db失败: {e}"
        if file_exists_in_db_(kb_name=uid, filename=query + ".txt"):
            file_name_hash = get_file_name_hash_from_db(kb_name=uid, filename=query + ".txt")["file_name_hash"]
            if file_name_hash:
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            else:
                file_name_hash = create_file_name_hash(query) + ".txt"
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
        else:
            file_name_hash = create_file_name_hash(query) + ".txt"
            local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
        os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
        with open(local_file_path, "w+", encoding="utf-8") as f:
            f.write(query)
        try:
            upload_docs_method(knowledge_base_name=uid, local_file_paths=[local_file_path], chunk_size=1000, override=True, file_hash_names=[file_name_hash], file_original_names=[query + ".txt"])
            logger.info(f"用户：{uid} 成功上传了爱心文档：{query}")
            return BaseResponse(code=200, msg="上传爱心文档成功")
        except Exception as e:
            logger.error(f"用户：{uid} 上传爱心文档失败：{query} 失败原因：{e}")
            return BaseResponse(code=500, msg="上传爱心文档失败", data={"error": str(e)})
    if whether_train and pet_query != "" and query != "":
        response = await fenxi_chat(pet_input=pet_query, user_input=query, prompt_name="fenxi_chat")
        result = response.dict()["data"]["answer"]
        if result == "1":
            try:
                request_id = "Training_" + request_id
                add_message_to_mars_training_mode_kb_chat_db(uid, request_id, chat_type="training_mode_chat",
                                                             pet_input=pet_query, user_input=query, instruction="-1")
            except Exception as e:
                logger.error(f"保存用户和宠物的输入到db失败: {e}")
                raise f"保存用户和宠物的输入到db失败: {e}"
            if file_exists_in_db_(kb_name=uid, filename=pet_query + ".txt"):
                file_name_hash = get_file_name_hash_from_db(kb_name=uid, filename=pet_query + ".txt")["file_name_hash"]
                if file_name_hash:
                    local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
                else:
                    file_name_hash = create_file_name_hash(pet_query) + ".txt"
                    local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            else:
                file_name_hash = create_file_name_hash(pet_query) + ".txt"
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
            # 训练模式下，用户的输入就是在回答宠物的问题
            summary = await summarize_chat(query=pet_query, temperature=TEMPERATURE, prompt_name="summarize", answer=query)
            with open(local_file_path, "w+", encoding="utf-8") as f:
                f.write(summary)
            upload_docs_method(knowledge_base_name=uid, local_file_paths=[local_file_path], chunk_size=1000, override=True, file_hash_names=[file_name_hash], file_original_names=[pet_query + ".txt"])

            # user_vector_store.add_texts([summary_str])
            # user_vector_store.save_local(user_vs_path)
            logger.info(f"uid: {uid} , pet_input: {pet_query} , user_input: {query}, summary: {summary}")
        else:
            result = "2"
            summary = ""
            logger.warning(f"uid: {uid} , pet_input: {pet_query} , user_input: {query} 用户没有正面回答宠物问题")
        return BaseResponse(code=200, msg="训练成功", data={
            "uid": uid,
            "pet_input": pet_query,
            "user_input": query,
            "summary": summary,
            "relevant": result
        })
    # 获取uid的历史对话
    try:
        histories = get_message_by_user_id_chat_with_own_pet(user_id=uid, history_len=HISTORY_LEN)
    except Exception as e:
        logger.error(f"Failed to get chat history: {e}")
        histories = []

    # 检查是否识别到了意图
    data_content = None
    try:
        instruction = await llm_chat(query, "Intention_Recognition_Expert_Pet", 0.1)
    except Exception as e:
        logger.error(f"Failed to recognize the instruction: {e}")
        instruction = "-1"
    city = ""
    district = ""
    if instruction in INSTRUCTIONS:
        answer = None
        request_id = "I_" + request_id
        if instruction == "5":
            try:
                info = await llm_chat(query, "Extract", 0.1)
                if info != "缺少信息":
                    info = info.split(",")
                    if len(info) == 1:
                        city = info[0]
                        district = ""
                    if len(info) == 2:
                        city = info[0]
                        district = info[1]
                if district == "None":
                    district = ""
            except Exception as e:
                logger.error(f"Failed to extract the information: {e}")
                response = await knowledge_base_chat(uid=uid,
                                                     query=query,
                                                     stream=stream,
                                                     history=histories,
                                                     knowledge_base_name=knowledge_base_name,
                                                     request_id=request_id,
                                                     score_threshold=SCORE_THRESHOLD,
                                                     top_k=VECTOR_SEARCH_TOP_K,
                                                     model_name=LLM_MODELS[0],
                                                     temperature=TEMPERATURE,
                                                     max_tokens=MAX_TOKENS,
                                                     request=request,
                                                     prompt_name=prompt_name,
                                                     nickname=nickname,
                                                     character=character)
                if stream:
                    responses = EventSourceResponse(response)
                else:
                    responses = StreamingResponse(response)
                return responses

        if stream:
            responses = EventSourceResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content, city=city, district=district))
        else:
            responses = StreamingResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content, city=city, district=district))
        chat_type = "knowledge_base_chat"
        log_content(uid, query, answer, instruction=instruction, request_id=request_id, city=city, district=district,
                    chat_type=chat_type)
        return responses

    else:
        request_id = "D_" + request_id
        response = await knowledge_base_chat(uid=uid,
                                             query=query,
                                             stream=stream,
                                             history=histories,
                                             knowledge_base_name=knowledge_base_name,
                                             request_id=request_id,
                                             score_threshold=SCORE_THRESHOLD,
                                             top_k=VECTOR_SEARCH_TOP_K,
                                             model_name=LLM_MODELS[0],
                                             temperature=TEMPERATURE,
                                             max_tokens=MAX_TOKENS,
                                             request=request,
                                             prompt_name=prompt_name,
                                             nickname=nickname,
                                             character=character)
        if stream:
            responses = EventSourceResponse(response)
        else:
            responses = StreamingResponse(response)
        return responses


async def mars_long_memory_chat_token_v2(uid: str = Body(..., description="用户ID"),
                                      pet_query: Optional[str] = Body("", description="宠物输入",
                                                                      examples=["恼羞成怒"]),
                                      query: Optional[str] = Body("", description="用户输入", examples=["恼羞成怒"]),
                                      stream: bool = Body(False, description="流式输出"),
                                      nickname: Optional[str] = Body(DEFAULT_NICKNAME, description="用户昵称"),
                                      character: Optional[str] = Body(DEFAULT_CHARACTER, description="性格"),
                                      occupation: Optional[str] = Body("", description="职业"),
                                      whether_train: Optional[bool] = Body(False, description="是否训练"),
                                      request: Request = None
                                      ):
    # 生成唯一ID
    request_id = generate_request_id()
    # 解码token
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        return BaseResponse(code=401, msg="Token is invalid", data={"error": str(e.detail.get('msg'))})
    # 要求query 非空
    query = query.strip()
    if pet_query == "" and query == "":
        return BaseResponse(code=400, msg="用户输入和宠物输入不能同时为空")
    if pet_query != "" and not whether_train:
        return BaseResponse(code=400, msg="不训练时，宠物不用输入")
    create_kb(knowledge_base_name=uid, vector_store_type="faiss", embed_model=EMBEDDING_MODEL)
    if whether_train and pet_query == "" and query != "":

        try:
            request_id = "Training_" + request_id
            add_message_to_mars_training_mode_kb_chat_db(uid, request_id, chat_type="training_mode_chat",
                                                         user_input=query, instruction="-1")
        except Exception as e:
            logger.error(f"保存用户的输入到db失败: {e}")
            raise f"保存用户的输入到db失败: {e}"
        if file_exists_in_db_(kb_name=uid, filename=query + ".txt"):
            file_name_hash = get_file_name_hash_from_db(kb_name=uid, filename=query + ".txt")["file_name_hash"]
            if file_name_hash:
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            else:
                file_name_hash = create_file_name_hash(query) + ".txt"
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
        else:
            file_name_hash = create_file_name_hash(query) + ".txt"
            local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
        os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
        with open(local_file_path, "w+", encoding="utf-8") as f:
            f.write(query)
        try:
            upload_docs_method(knowledge_base_name=uid, local_file_paths=[local_file_path], chunk_size=1000,
                               override=True, file_hash_names=[file_name_hash], file_original_names=[query + ".txt"])
            logger.info(f"用户：{uid} 成功上传了爱心文档：{query}")
            return BaseResponse(code=200, msg="上传爱心文档成功")
        except Exception as e:
            logger.error(f"用户：{uid} 上传爱心文档失败：{query} 失败原因：{e}")
            return BaseResponse(code=500, msg="上传爱心文档失败", data={"error": str(e)})
    if whether_train and pet_query != "" and query != "":
        response = await fenxi_chat(pet_input=pet_query, user_input=query, prompt_name="fenxi_chat")
        result = response.dict()["data"]["answer"]
        if result == "1":
            try:
                request_id = "Training_" + request_id
                add_message_to_mars_training_mode_kb_chat_db(uid, request_id, chat_type="training_mode_chat",
                                                             pet_input=pet_query, user_input=query, instruction="-1")
            except Exception as e:
                logger.error(f"保存用户和宠物的输入到db失败: {e}")
                raise f"保存用户和宠物的输入到db失败: {e}"
            if file_exists_in_db_(kb_name=uid, filename=pet_query + ".txt"):
                file_name_hash = get_file_name_hash_from_db(kb_name=uid, filename=pet_query + ".txt")["file_name_hash"]
                if file_name_hash:
                    local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
                else:
                    file_name_hash = create_file_name_hash(pet_query) + ".txt"
                    local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            else:
                file_name_hash = create_file_name_hash(pet_query) + ".txt"
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
            # 训练模式下，用户的输入就是在回答宠物的问题
            summary = await summarize_chat(query=pet_query, temperature=TEMPERATURE, prompt_name="summarize",
                                           answer=query)
            with open(local_file_path, "w+", encoding="utf-8") as f:
                f.write(summary)
            upload_docs_method(knowledge_base_name=uid, local_file_paths=[local_file_path], chunk_size=1000,
                               override=True, file_hash_names=[file_name_hash],
                               file_original_names=[pet_query + ".txt"])

            logger.info(f"uid: {uid} , pet_input: {pet_query} , user_input: {query}, summary: {summary}")
        else:
            result = "2"
            summary = ""
            logger.warning(f"uid: {uid} , pet_input: {pet_query} , user_input: {query} 用户没有正面回答宠物问题")
        return BaseResponse(code=200, msg="训练成功", data={
            "uid": uid,
            "pet_input": pet_query,
            "user_input": query,
            "summary": summary,
            "relevant": result
        })
    # 获取uid的历史对话
    try:
        histories = get_message_by_user_id_chat_with_own_pet(user_id=uid, history_len=HISTORY_LEN)
    except Exception as e:
        logger.error(f"Failed to get chat history: {e}")
        histories = []

    if occupation in OCCUPATION_MAPPING:
        occupation_kb_name = OCCUPATION_MAPPING[occupation]
    else:
        occupation_kb_name = ""

    # 检查是否识别到了意图
    data_content = None
    try:
        instruction = await llm_chat(query, "Intention_Recognition_Expert_Pet", 0.1)
    except Exception as e:
        logger.error(f"Failed to recognize the instruction: {e}")
        instruction = "-1"
    city = ""
    district = ""
    if instruction in INSTRUCTIONS:
        answer = None
        request_id = "I_" + request_id
        if instruction == "5":
            try:
                info = await llm_chat(query, "Extract", 0.1)
                if info != "缺少信息":
                    info = info.split(",")
                    if len(info) == 1:
                        city = info[0]
                        district = ""
                    if len(info) == 2:
                        city = info[0]
                        district = info[1]
                if district == "None":
                    district = ""
            except Exception as e:
                logger.error(f"Failed to extract the information: {e}")
                response = await knowledge_base_chat(uid=uid,
                                                     query=query,
                                                     stream=stream,
                                                     history=histories,
                                                     knowledge_base_name=knowledge_base_name,
                                                     request_id=request_id,
                                                     score_threshold=SCORE_THRESHOLD,
                                                     top_k=VECTOR_SEARCH_TOP_K,
                                                     model_name=LLM_MODELS[0],
                                                     temperature=TEMPERATURE,
                                                     max_tokens=MAX_TOKENS,
                                                     request=request,
                                                     prompt_name=prompt_name,
                                                     nickname=nickname,
                                                     character=character,
                                                     occupation=occupation,
                                                     occupation_kb=occupation_kb_name)
                if stream:
                    responses = EventSourceResponse(response)
                else:
                    responses = StreamingResponse(response)
                return responses

        if stream:
            responses = EventSourceResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content, city=city, district=district))
        else:
            responses = StreamingResponse(
                create_chat_response_event(response_text=answer, instruction=instruction, request_id=request_id,
                                           finish=True, data_content=data_content, city=city, district=district))
        chat_type = "knowledge_base_chat"
        log_content(uid, query, answer, instruction=instruction, request_id=request_id, city=city, district=district,
                    chat_type=chat_type)
        return responses

    else:
        request_id = "D_" + request_id
        response = await knowledge_base_chat(uid=uid,
                                             query=query,
                                             stream=stream,
                                             history=histories,
                                             knowledge_base_name=knowledge_base_name,
                                             request_id=request_id,
                                             score_threshold=SCORE_THRESHOLD,
                                             top_k=VECTOR_SEARCH_TOP_K,
                                             model_name=LLM_MODELS[0],
                                             temperature=TEMPERATURE,
                                             max_tokens=MAX_TOKENS,
                                             request=request,
                                             prompt_name=prompt_name,
                                             nickname=nickname,
                                             character=character,
                                             occupation=occupation,
                                             occupation_kb=occupation_kb_name)
        if stream:
            responses = EventSourceResponse(response)
        else:
            responses = StreamingResponse(response)
        return responses
