from configs.logging_config import configure_logging
from loguru import logger
from dashscope import Generation
from datetime import datetime
from server.utils import create_chat_response
from server.chat.utils import generate_request_id
from configs.kb_config import VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD
import random
import pytz
import json
import urllib.parse

# 配置日志
configure_logging()

DASHSCOPE_API_KEY = "sk-e68abfde4e7c455b902369dfad28c55f" # 请替换实际API Key
MODEL_NAME = "qwen-max" # 请替换实际模型名称


# 定义工具列表，模型在选择使用哪个工具时会参考工具的name和description
tools = [
    # 工具1 获取当前时刻的时间
    {
        "type": "function",
        "function": {
            "name": "get_current_time",
            "description": "当你想知道现在的时间时非常有用。",
            "parameters": {},  # 因为获取当前时间无需输入参数，因此parameters为空字典
        },
    },
    # 工具2 获取指定城市的天气
    {
        "type": "function",
        "function": {
            "name": "get_current_weather",
            "description": "当你想查询指定城市的天气时非常有用",
            "parameters": {
                # 查询天气时需要提供位置，因此参数设置为location
                "type": "object",
                "properties": {
                    "location": {
                        "type": "string",
                        "description": "城市或县区，比如北京、杭州、余杭等，不需要带上“市”、“区”等字样",
                    }
                },
                "required": ["location"],
            },
        },
    },
]


# 真实天气查询工具（需要替换自己的AppCode）
def get_current_weather(location):
    import urllib3
    host = 'https://getweather.market.alicloudapi.com'
    path = '/lundear/weather7d'
    query = f'areaCn={urllib.parse.quote(location)}'  # 仅保留必要参数
    app_code = "bdff0239f15748528998b966ffaf4f02" # 请替换实际AppCode
    url = f"{host}{path}?{query}"

    http = urllib3.PoolManager()
    headers = {'Authorization': f'APPCODE {app_code}'}  # 请替换实际AppCode

    try:
        response = http.request('GET', url, headers=headers)
        data = json.loads(response.data.decode('utf-8'))
        # 在返回前添加数据校验
        valid_data = data.get("data")

        if not valid_data:
            logger.warning("天气API返回数据结构异常")
            return "获取的天气数据格式有误，请稍后再试"

        if data.get("code") == 0:  # 成功状态码为int 0
            now = data["data"].get("now", {})
            city_info = data["data"].get("cityInfo", {})
            days = [data["data"].get(f"d{i}") for i in range(1,4)]  # 获取未来3天预报
            
            if now:
                # 实时天气部分
                current_weather = (
                    f"{city_info.get('areaCn', location)}实时天气：\n"
                    f"天气状况：{now.get('weather', '未知')}\n"
                    f"当前温度：{now.get('temp', '未知')}℃\n"
                    f"湿度：{now.get('SD', '未知').replace('%', '')}%\n"
                    f"风速：{now.get('wse', '未知')}m/s\n"
                    f"风向：{now.get('WD', '未知')}\n"
                )
            else:
                current_weather = ""
            # 未来三天预报
            forecast = "\n未来三天天气预报："
            for i, day in enumerate(days[:3], 1):
                if day:
                    forecast += (
                        f"\n第{i}天（{day.get('time', '')}）：\n"
                        f"天气：{day.get('weather', '未知')}\n"
                        f"温度：{day.get('temperature_min', '未知')} ~ {day.get('temperature_max', '未知')}℃\n"
                        f"风力：{day.get('wind_pow', '未知')}\n"
                    )
            advice = ""
            # 生活指数建议
            if life_index := days[0].get("lifeIndex"):
                advice = "\n生活指数建议："
                for index, details in life_index.items():
                    if isinstance(details, dict):
                        advice += f"\n{index}：{details.get('state', '')}（{details.get('reply', '')}）"
            
            return current_weather + forecast + advice
        else:
            logger.error(f"获取天气失败：{data.get('desc', '未知错误')}（错误码：{data.get('code')}）")
            return f"获取天气信息失败，请稍后再试"
    except json.JSONDecodeError as e:
        logger.error(f"天气数据解析失败，请检查API响应格式：{str(e)}")
        # 返回的这句话是要给到大模型的
        return "获取天气信息失败，请稍后再试"
    except KeyError as e:
        logger.error(f"天气数据字段缺失：{str(e)}")
        # 返回的这句话是要给到大模型的
        return f"获取天气信息失败，请稍后再试"
    except Exception as e:
        logger.error(f"天气查询异常：{str(e)}")
        # 返回的这句话是要给到大模型的
        return f"获取天气信息失败，请稍后再试"


# 查询当前时间的工具。返回结果示例：“当前时间：2024-04-15 17:15:18。“
def get_current_time():
    tz_beijing = pytz.timezone("Asia/Shanghai")
    # 获取当前日期和时间
    current_datetime = datetime.now(tz_beijing)
    # 格式化当前日期和时间
    formatted_time = current_datetime.strftime("%Y-%m-%d %H:%M:%S")
    # 返回格式化后的当前时间
    return f"当前时间：{formatted_time}。"




# 封装模型响应函数
def get_response(messages, model_name=MODEL_NAME, stream=True, result_format="message", tools=[], temperature=0.3, parallel_tool_calls=False):
    response = Generation.call(
        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
        api_key=DASHSCOPE_API_KEY,
        model=model_name,  # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        messages=messages,
        tools=tools,
        temperature=temperature,
        seed=random.randint(
            1, 10000
        ),  # 设置随机数种子seed，如果没有设置，则随机数种子默认为1234
        result_format=result_format,  # 将输出设置为message形式
        parallel_tool_calls=parallel_tool_calls,
        stream=stream,
        incremental_output=True if stream else False
    )
    return response


from fastapi import FastAPI, Body, Request, Depends
from sse_starlette.sse import EventSourceResponse
from sqlalchemy.orm import Session
from server.db.session import get_db
from configs.model_config import HISTORY_LEN
from server.utils import BaseResponse, decode_verify_token
from configs.kb_config import GO_SERVICE_SECRET_KEY
from configs.other_config import ALGORITHM
from configs.model_config import EMBEDDING_MODEL
from server.db.repository.message_repository import add_message_to_mars_training_mode_kb_chat_db, get_message_by_user_id_chat_with_own_pet, add_message_to_mars_chat_with_own_pet_db
from server.knowledge_base.kb_service.base import KBServiceFactory
from server.knowledge_base.utils import get_file_path
from server.knowledge_base.kb_doc_api import upload_docs_method
from server.utils import create_file_name_hash
from server.db.repository.knowledge_file_repository import get_file_name_hash_from_db, file_exists_in_db_
from server.knowledge_base.kb_api import create_kb
from server.knowledge_base.kb_doc_api import search_docs
from fastapi.concurrency import run_in_threadpool
from configs.other_config import OCCUPATION_MAPPING
knowledge_base_name = "Mars"
import os
app = FastAPI()
@app.post("/chat/new_func_call2")
async def call_with_messages(uid: str = Body(..., description="用户ID"),
                             query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
                             pet_query: str = Body("", description="宠物输入", examples=["恼羞成怒"]),
                             stream: bool = Body(False, description="流式输出"),
                             nickname: str = Body(..., description="用户昵称"),
                             character: str = Body(..., description="用户性格"),
                             occupation: str = Body("", description="用户职业"),
                             whether_train: bool = Body(False, description="是否智能训练"),
                             db: Session = Depends(get_db),
                             request: Request = None):
    request_id = generate_request_id()

    # 解码token
    try:
        token = decode_verify_token(request, GO_SERVICE_SECRET_KEY, ALGORITHM)
    except Exception as e:
        return BaseResponse(code=401, msg="Token is invalid", data={"error": str(e.detail.get('msg'))})
    # 要求query 非空
    query = query.strip()
    if pet_query == "" and query == "":
        return BaseResponse(code=400, msg="用户输入和宠物输入不能同时为空")
    if pet_query != "" and not whether_train:
        return BaseResponse(code=400, msg="不训练时，宠物不用输入")
    create_kb(knowledge_base_name=uid, vector_store_type="faiss", embed_model=EMBEDDING_MODEL)
    if whether_train and pet_query == "" and query != "":

        try:
            request_id = "Training_" + request_id
            add_message_to_mars_training_mode_kb_chat_db(uid, request_id, chat_type="training_mode_chat",
                                                         user_input=query, instruction="-1")
        except Exception as e:
            logger.error(f"保存用户的输入到db失败: {e}")
            raise f"保存用户的输入到db失败: {e}"
        if file_exists_in_db_(kb_name=uid, filename=query + ".txt"):
            file_name_hash = get_file_name_hash_from_db(kb_name=uid, filename=query + ".txt")["file_name_hash"]
            if file_name_hash:
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            else:
                file_name_hash = create_file_name_hash(query) + ".txt"
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
        else:
            file_name_hash = create_file_name_hash(query) + ".txt"
            local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
        os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
        with open(local_file_path, "w+", encoding="utf-8") as f:
            f.write(query)
        try:
            upload_docs_method(knowledge_base_name=uid, local_file_paths=[local_file_path], chunk_size=1000,
                               override=True, file_hash_names=[file_name_hash], file_original_names=[query + ".txt"])
            logger.info(f"用户：{uid} 成功上传了爱心文档：{query}")
            return BaseResponse(code=200, msg="上传爱心文档成功")
        except Exception as e:
            logger.error(f"用户：{uid} 上传爱心文档失败：{query} 失败原因：{e}")
            return BaseResponse(code=500, msg="上传爱心文档失败", data={"error": str(e)})
    if whether_train and pet_query != "" and query != "":
        messages = [
            {
                "role": "system",
                "content": """## 验证规则\n
            1. 相关性检测：分析回答是否直接关联NPC的问题主题\n
            2. 回避识别：标记"我不知道/随便/不记得"等通用回避语句\n
            3. 内容充实度：有效回答应包含至少1个具体细节或事例\n
            4. 异常模式：检测诗歌曲词、乱码、外语等非正常回答形式\n
            ## 重要\n
            - 记住：1表示用户是在回答NPC的问题，2表示用户不是在回答NPC的问题\n
            ## 返回\n
            必须以JSON格式返回，格式为{"answer": ""}，不允许输出其他内容
            """
            },
            {
                "role": "user",
                "content": f"宠物输入: {pet_query}\n用户回答: {query}"
            }
        ]
        response = get_response(messages, stream=False, result_format="json_object")
        if response.status_code == 200:
            code = 200
            json_data = json.loads(response.output.text)
            result = json_data["answer"]
        else:
            logger.error(f"获取用户回答是否是正面回答宠物问题失败: {response}")
            code = 500
            result = "2"
        if result == "1":
            try:
                request_id = "Training_" + request_id
                add_message_to_mars_training_mode_kb_chat_db(uid, request_id, chat_type="training_mode_chat",
                                                             pet_input=pet_query, user_input=query, instruction="-1")
            except Exception as e:
                logger.error(f"保存用户和宠物的输入到db失败: {e}")
                raise f"保存用户和宠物的输入到db失败: {e}"
            if file_exists_in_db_(kb_name=uid, filename=pet_query + ".txt"):
                file_name_hash = get_file_name_hash_from_db(kb_name=uid, filename=pet_query + ".txt")["file_name_hash"]
                if file_name_hash:
                    local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
                else:
                    file_name_hash = create_file_name_hash(pet_query) + ".txt"
                    local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            else:
                file_name_hash = create_file_name_hash(pet_query) + ".txt"
                local_file_path = get_file_path(knowledge_base_name=uid, doc_name=file_name_hash)
            os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
            # 训练模式下，用户的输入就是在回答宠物的问题
            messages = [
                {
                    "role": "system",
                    "content": """请总结以下问答，并提取其中的关键点。我需要上传到知识库，所以要求阐述出核心信息，忽略不重要的细节。请以简洁且清晰的方式阐明问答中的关键信息。\n
                        例如：\n
                        问题：你会经常利用业余时间学习新技能吗？\n
                        答案：是的，我会利用业余时间学习新技能。\n
                        总结：会利用业余时间学习新技能。\n
                        必须以JSON格式返回，格式为{"summary": ""}，不允许输出其他内容
                    """
                },
                {
                    "role": "user",
                    "content": f"问题：{pet_query}\n答案：{query}"
                }
            ]
            summary = get_response(messages, stream=False, result_format="json_object")
            if summary.status_code == 200:
                json_data = json.loads(summary.output.text)
                summary = json_data["summary"]
                with open(local_file_path, "w+", encoding="utf-8") as f:
                    f.write(summary)
                upload_docs_method(knowledge_base_name=uid, local_file_paths=[local_file_path], chunk_size=1000,
                                override=True, file_hash_names=[file_name_hash],
                                file_original_names=[pet_query + ".txt"])
                code = 200
                logger.info(f"uid: {uid} , pet_input: {pet_query} , user_input: {query}, summary: {summary}")
            else:
                logger.error(f"总结问答失败: {summary}")
                code = 500
                summary = ""
        else:
            result = "2"
            summary = ""
            logger.warning(f"uid: {uid} , pet_input: {pet_query} , user_input: {query} 用户没有正面回答宠物问题")
        return BaseResponse(code=code, msg="训练成功", data={
            "uid": uid,
            "pet_input": pet_query,
            "user_input": query,
            "summary": summary,
            "relevant": result
        })
    
    if occupation in OCCUPATION_MAPPING:
        occupation_kb = OCCUPATION_MAPPING[occupation]
    else:
        occupation_kb = ""
    mars_kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
    if mars_kb is None:
        create_kb(knowledge_base_name=knowledge_base_name, vector_store_type="faiss", embed_model=EMBEDDING_MODEL)
        logger.info(f"由于用户在与自己宠物对话时，MarsAPP的知识库不存在，所以在此创建知识库: {knowledge_base_name}")

    user_kb = KBServiceFactory.get_service_by_name(uid)
    if user_kb is None:
        create_kb(knowledge_base_name=uid, vector_store_type="faiss", embed_model=EMBEDDING_MODEL)
        # TODO: 需要注意为什么知识库不存在
        logger.warning(f"由于用户在与自己宠物对话时，用户自己的知识库不存在，所以在此创建知识库: {uid}")

    
    mars_docs = await run_in_threadpool(search_docs,
                                       query=query,
                                       knowledge_base_name=knowledge_base_name,
                                       top_k=VECTOR_SEARCH_TOP_K,
                                       score_threshold=SCORE_THRESHOLD)
    user_docs = await run_in_threadpool(search_docs,
                                       query=query,
                                       knowledge_base_name=uid,
                                       top_k=VECTOR_SEARCH_TOP_K,
                                       score_threshold=SCORE_THRESHOLD)
    docs = mars_docs + user_docs
    context = "\n".join([doc.page_content for doc in docs])
    # 初始化messages
    messages = [
        {
            "role": "system",
            "content": f"""
            从现在起你的名字叫: {nickname}，性格: {character}，职业: {occupation}\n
            已知信息: {context}
            在回答问题时，要分析已知信息对问题有没有用，如果没有用，就不要借鉴已知信息
            """
        }
    ]
    # 获取用户的历史对话
    histories = get_message_by_user_id_chat_with_own_pet(uid, history_len=HISTORY_LEN)
    messages.extend(histories)
    messages.append({
        "role": "user",
        "content": query,
    })
    async def stream_response():
        final_answer = ""
        tool_param_value = ""
        # 第一次大模型调用
        first_generator = get_response(messages, stream=stream, tools=tools, temperature=0.1)
        need_tool_call = False
        for token in first_generator:
            data = json.dumps(token, ensure_ascii=False)
            if data.startswith("data:"):
                data = data[5:]
            data = json.loads(data)
            answer = data["output"]["choices"][0]["message"]
            if "tool_calls" not in answer:
                print(f"answer: {answer}\n")
                final_answer += answer["content"]
                yield create_chat_response(answer["content"], instruction="-1", request_id=request_id, finish=False, data_content="", code=200)
            else:
                need_tool_call = True
                print(f"answer: {answer}\n")
                if "name" in answer["tool_calls"][0]["function"]:
                    tool_name = answer["tool_calls"][0]["function"]["name"]
                    tool_id = answer["tool_calls"][0]["id"]
                else:
                    if "arguments" in answer["tool_calls"][0]["function"]:
                        tool_param_value = answer["tool_calls"][0]["function"]["arguments"]
                    else:
                        continue
        yield create_chat_response(final_answer, instruction="-1", request_id=request_id, finish=True, data_content="", code=200)

        if not need_tool_call:
            # 保存用户和宠物的对话到db
            add_message_to_mars_chat_with_own_pet_db(uid, request_id, chat_type="chat_with_own_pet",
                                                     query=query, response=final_answer, instruction="-1")
            return
        else:
            if tool_name == "get_current_weather":
                tool_param_value = tool_param_value.replace("'", "").replace('"', "").replace("{", "").replace("}", "")
                arguments = {"location": tool_param_value}
                tool_output = get_current_weather(tool_param_value)
                print(f"weather tool_output: {tool_output}\n")
            elif tool_name == "get_current_time":
                arguments = {}
                tool_output = get_current_time()
                print(f"time tool_output: {tool_output}\n")
            else:
                logger.error(f"未知的工具名称：{tool_name}")
                tool_output = "未知的工具名称，请稍后再试"
            tool_message = {
                "tool_call_id": tool_id,
                "role": "tool",
                "content": tool_output,
            }
            assistant_message = {
                "role": "assistant",
                "content": "",
                "tool_calls": [
                    {
                        "id": tool_id,
                        "function": {
                            "arguments": json.dumps(arguments, ensure_ascii=False),
                            "name": tool_name,
                        },
                        "type": "function",
                        "index": 0,
                    }
                ]
            }
            messages.append(assistant_message) # 将模型判断需要调用工具的回复添加到messages中
            messages.append(tool_message) # 将工具的输出添加到messages中
            # 当模型判断需要调用工具时，进行第二轮调用
            second_generator = get_response(messages, stream=stream, tools=tools, temperature=0.3)
            for token in second_generator:
                data = json.dumps(token, ensure_ascii=False)
                if data.startswith("data:"):
                    data = data[5:]
                data = json.loads(data)
                answer = data["output"]["choices"][0]["message"]["content"]
                final_answer += answer
                yield create_chat_response(answer, instruction="-1", request_id=request_id, finish=False, data_content="", code=200)
            # 保存用户和宠物的对话到db
            add_message_to_mars_chat_with_own_pet_db(uid, request_id, chat_type="chat_with_own_pet",
                                                     query=query, response=final_answer, instruction="-1")
            print(f"final_answer: {final_answer}\n")
            yield create_chat_response(final_answer, instruction="-1", request_id=request_id, finish=True, data_content="", code=200)
            
    
    return EventSourceResponse(stream_response())

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)