import json
import uvicorn
import asyncio
import logging

from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from langchain_core.tools import tool
from langchain_core.messages.ai import AIMessageChunk, ToolCallChunk
from langchain_core.messages.tool import ToolMessage
from pydantic import BaseModel, Field
from typing import Any, Dict, List

from insight_agent.mcp_client.frontend_service.agent_generator import AgentGenerator
from insight_agent.mcp_client.frontend_service.configuration import Configuration


# ========================= 全局变量 =========================
# 加载基础配置
cfg = Configuration()
# # Insight Request JSON
# insight_request = {
#     "keywords": [],
#     "user_latest_request": "",
#     "user_request_summary": ""
# }
# Insight Result JSON
insight_result = {
    "current_modes": [],
    "agent_graph":{
        "summary": "",
        "data": None
    },
    "agent_map":{
        "summary": "",
        "data": None
    },
    "agent_time_series":{
        "summary": "",
        "data": None
    },
    "agent_statistics":{
        "summary": "",
        "data": None
    }
}

# # ========================= 创建 LangGraph 工具 - 更新 Insight Request =========================
# async def update_insight_request(keywords: set):
#     '''
#     提取用户输入中的业务关键词，包括语义相近的关键词，形成一个无重复元素的集合 keywords，并更新 “洞察输入” insight_request
#     insight_request 保存了用户历史输入的关键词、最新的输入、历史输入的总结
#     :param keywords: 业务关键词的集合
#     :return: insight_request
#     '''
#     for keyword in keywords:
#         if keyword not in insight_request["keywords"]:
#             insight_request["keywords"].append(keyword)
#     # await main_agent.ainvoke(
#     #     {"messages": [{"role": "system", "content": f'用户洞察输入insight_request更新为:{format(insight_request)}'}]},
#     #     cfg.thread_config_2
#     # )
#     print(f'insight_request更新为:{insight_request}')


# ========================= 定义主控智能体 =========================
# 加载Agent生成器
agent_generator = AgentGenerator()
# 加载LangGraph内部工具
internal_tools = []
# 初始化 Insight Agent
parallel_main_agent = asyncio.run(agent_generator.create_parallel_tool_call_main_agent(cfg.llm_model_name,
                                                                                       cfg.main_agent_mcp_servers_config_path,
                                                                                       internal_tools))
click_to_ask_agent = asyncio.run(agent_generator.create_click_to_ask_agent(cfg.llm_model_name,
                                                                   cfg.click_to_ask_agent_prompt_file_path))
main_agent = asyncio.run(agent_generator.create_main_agent(cfg.llm_model_name,
                                                           cfg.main_agent_prompt_file_path,
                                                           cfg.main_agent_mcp_servers_config_path,
                                                           internal_tools))
# 初始化 Assistant Agent
assistant_agent = asyncio.run(
    agent_generator.create_assistant_agent(cfg.llm_model_name,
                                           '''<角色>你是一名文字总结助理</角色>
                                           <目标>总结内容但回答总字数不超过原内容字数</目标>'''))
# 创建智能体思考回答队列用于接收结果
thinking_response_queue = asyncio.Queue()
# 创建智能体正式回答队列用于接收结果
formal_response_queue = asyncio.Queue()
# 定义 Insight Agent 思考回答
insight_agent_thinking_start = '正在思考中...'
# 定义思考过程首尾以呈现特殊形式文字
thinking_response_head_1 = "<em><span style='color: #d9d9d9;'>"
thinking_response_tail_1 = "</span></em><br>"
# 分析用户意图  选择专家协作智能体  正在总结回答
thinking_response_async_tasks = set()
formal_response_async_tasks = set()


# # ========================= 分线程大模型应用 =========================
# async def summarize_info(input):
#     print(f'✅用户历史输入总结: {input}')
#     result = await assistant_agent.ainvoke(
#         {"messages": [{"role": "system", "content": f'总结：“{input}”，字数限制：{round(len(input)*1.2)}'}]},
#         cfg.thread_config_2
#     )
#     response_content = result['messages'][-1].content
#     print(f'✅用户历史输入总结更新: {response_content}')
#     return response_content


# ========================= 定义服务及使用到的函数 =========================
front_service = FastAPI()

async def clean_up_queue(queue):
    """ 安全清空异步队列 """
    try:
        while not queue.empty():
            try:
                queue.get_nowait()   # 非阻塞方式获取
            except asyncio.QueueEmpty:
                break
    except Exception:
        pass  # 暂忽略清理过程中的异常


# ========================= 【前后端】导览页 - 导览页服务 =========================
@front_service.post(cfg.guide_page_post_url)
async def respond_insight_time_series_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": cfg.guide_page_response_template.copy()  # 前端具备前调试生成洞察途径
    }
    return response_data


# ========================= 【前后端】自由探索页 - 图谱模式服务 =========================
class Vertex(BaseModel):
    label: str
    id: str
    properties: Dict[str, Any]

class Edge(BaseModel):
    label: str
    id: str
    srcId: str
    dstId: str
    properties: Dict[str, Any]

class ResultSet(BaseModel):
    header: List[str]
    table: List[List[Dict[str, Any]]]

class Data(BaseModel):
    resultSet: ResultSet
    vertexSet: Dict[str, Vertex]
    edgeSet: Dict[str, Edge]
    profile: Dict[str, float]
    errors: List[Any]
    vertexCount: int
    edgeCount: int
    resultCount: int

class Response(BaseModel):
    code: str
    msg: str
    data: Data

@front_service.post(cfg.graph_post_url)
async def respond_graph_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": cfg.graph_response_template.copy()
    }
    return response_data


# ========================= 【前后端】自由探索页 - 图谱模型查询框服务 =========================
@front_service.post(cfg.graph_searchbox_series_post_url)
async def respond_graph_searchbox_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": cfg.graph_searchbox_response_template.copy()
    }
    return response_data


# ========================= 【前后端】自由探索页 - 地图模式服务 =========================
@front_service.post(cfg.map_post_url)
async def respond_map_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": cfg.map_response_template.copy()
    }
    return response_data


# ========================= 【前后端】自由探索页 - 统算模式服务 =========================
@front_service.post(cfg.stats_post_url)
async def respond_stats_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": cfg.stats_response_template.copy()
    }
    return response_data


# ========================= 【前后端】自由探索页 - 时序模式服务 =========================
@front_service.post(cfg.time_series_post_url)
async def respond_time_series_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": cfg.time_series_response_template.copy()
    }
    return response_data


# ========================= 【前后端】自由探索页 - AI点问服务 =========================
class ClickToAskRequest(BaseModel):
    question: str
    conversationId: str
    stream: bool

@front_service.post(cfg.click_to_ask_post_url)
async def respond_click_to_ask_post(user_request: ClickToAskRequest):
    """ 响应AI点问请求 """
    user_input = user_request.question.strip()
    print(user_input)

    async def event_generator():
        try:
            # 输出默认段首句
            items = [thinking_response_head_1, *list("图表解读中...\n\n"), thinking_response_tail_1]
            for item in items:
                yield item
                await asyncio.sleep(0.03)
            # 输出AI释义
            async for chunk in click_to_ask_agent.astream(
                    {"messages": [{"role": "user", "content": user_input}]},
                    cfg.thread_config_3,
                    stream_mode='messages'
            ):
                print(chunk[0].content, end='')
                yield chunk[0].content
        except Exception as exc:
            yield json.dumps({"code": "500", "msg": f"出错: {exc}", "data": {}}) + "\n"

    return StreamingResponse(event_generator(), media_type="application/x-ndjson")



# ========================= 【前后端】融合洞察页 - 思维链响应服务 =========================

class ChatThinkingRequest(BaseModel):
    question: str
    conversationId: str
    stream: bool

@front_service.post(cfg.chat_thinking_post_url)
async def respond_chat_thinking_post(user_request: ChatThinkingRequest):
    """ 响应用户对话请求的思维链 """
    user_input = user_request.question.strip()

    async def event_generator():
        try:
            # 清空队列确保状态
            await clean_up_queue(thinking_response_queue)
            # 启动思考收集和智能体请求两个任务
            thinking_start_task = asyncio.create_task(
                queue_thinking_response(insight_agent_thinking_start,
                                        thinking_response_head=thinking_response_head_1,
                                        thinking_response_tail=thinking_response_tail_1,
                                        add_blank_line=True)
            )
            thinking_response_async_tasks.add(thinking_start_task)
            print('\n🤔 Add thinking_start_task', thinking_response_async_tasks)
            # 处理思考队列
            while True:
                chunk = await thinking_response_queue.get()
                if chunk is None:
                    thinking_response_queue.task_done()
                    print('\n🤔 Task done', thinking_response_async_tasks)
                    break  # 遇到结束标记停止
                await asyncio.sleep(0.05)  # 拉开间隔
                yield chunk
                # thinking_response_queue.task_done()
            await asyncio.wait(thinking_response_async_tasks)
        except Exception as exc:
            yield json.dumps({"code": "500", "msg": f"出错: {exc}", "data": {}}) + "\n"
        finally:
            await clean_up_queue(formal_response_queue)

    return StreamingResponse(event_generator(), media_type="application/x-ndjson")


# ========================= 【前后端】融合洞察页 - 洞察回答响应服务 =========================
class ChatRequest(BaseModel):
    question: str
    conversationId: str
    stream: bool

@front_service.post(cfg.chat_post_url)
async def respond_chat_post(user_request: ChatRequest):
    """ 响应用户对话请求 """
    user_input = user_request.question.strip()
    if user_request.stream:
        headers = {
            'Content-Type': 'application/x-ndjson',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive'
        }
        answer = get_stream_answer(user_input, headers)
    else:
        answer = get_normal_answer(user_input)
    return await answer


async def queue_thinking_response(thinking_content: str,
                                  thinking_response_head='', thinking_response_tail='',
                                  add_blank_line: bool=False, end_thinking: bool=False):
    """ 将思考过程放入异步思考队列 """
    try:
        # 拆分思考内容为字符流
        items = [thinking_response_head, *list(thinking_content), thinking_response_tail]
        if add_blank_line:
            items.append('<br>')
        for item in items:
            await thinking_response_queue.put(item)
    except Exception as exc:
        await thinking_response_queue.put(f"思考过程出错: {exc}")
    if end_thinking:
        await thinking_response_queue.put(None)  # 思考结束标记


async def queue_main_agent_response( user_input):
    """ 将主智能体的回答放入回答队列中 """
    try:
        prompt = AgentGenerator.load_prompt(cfg.main_agent_prompt_file_path)

        async for chunk in parallel_main_agent.astream(
                {"messages": [{"role": "system", "content": prompt}, {"role": "user", "content": user_input}]},
                cfg.thread_config_1,
                stream_mode=['messages', 'updates']):
            if chunk[0] == 'messages':
                if isinstance(chunk[1][0], AIMessageChunk):
                    response_content = chunk[1][0].content
                    if response_content:
                        await formal_response_queue.put(chunk[1][0].content)
                    if chunk[1][0].tool_calls:
                        if not chunk[1][0].tool_calls[0]['name']:
                            continue
                        print(
                            f"\n🤔 根据用户输入的信息，Insight Agent 决定寻求协作: {[tc['name'] for tc in chunk[1][0].tool_calls]}")
                        thinking_decision_task = asyncio.create_task(
                            queue_thinking_response(
                                f'🔍 根据用户的输入，Insight Agent 决定向 **{'、'.join([tc['name'] for tc in chunk[1][0].tool_calls])}** 寻求协作',
                                add_blank_line=True)
                        )
                        thinking_response_async_tasks.add(thinking_decision_task)
                        print('\n🤔 Add thinking_end_tool_call_task', thinking_response_async_tasks)
            elif chunk[0] == 'updates':
                if 'agent' in chunk[1].keys():
                    if chunk[1]['agent']['messages'][0].response_metadata['finish_reason'] == 'tool_calls':
                        # 将协作智能体调用加入思维链
                        print('\n🤔 开始调用下列协作智能体：', '、'.join([tool['name'] for tool in chunk[1]['agent']['messages'][0].tool_calls]))
                        thinking_tool_call_task = asyncio.create_task(
                            queue_thinking_response(
                                f'💡 开始与智能体 **{'、'.join([tool['name'] for tool in chunk[1]['agent']['messages'][0].tool_calls])}** 协作',
                                add_blank_line=True)
                        )
                        thinking_response_async_tasks.add(thinking_tool_call_task)
                        print('\n🤔 Add thinking_tool_call_task', thinking_response_async_tasks)

                elif 'tools' in chunk[1].keys():
                    print('\n🤔 结束调用协作智能体：', '、'.join([tool.name for tool in chunk[1]['tools']['messages']]))
                    thinking_end_tool_call_task = asyncio.create_task(
                        queue_thinking_response(
                            f'✅ 结束与智能体 **{'、'.join([tool.name for tool in chunk[1]['tools']['messages']])}** 协作',
                            add_blank_line=True)
                    )
                    thinking_response_async_tasks.add(thinking_end_tool_call_task)
                    print('\n🤔 Add thinking_end_tool_call_task', thinking_response_async_tasks)

    except Exception as exc:
        await formal_response_queue.put(json.dumps({"code": "500", "msg": f"主代理出错: {exc}", "data": {}}))
    finally:
        await thinking_response_queue.put(None)
        await formal_response_queue.put(None)  # 结束标记


async def get_stream_answer(user_input, headers):
    """ 产出流式回答 """
    async def event_generator():
        try:
            # 清空队列确保状态
            # await clean_queue(thinking_response_queue)
            await clean_up_queue(formal_response_queue)

            # 启动主智能体请求两个任务
            main_agent_task = asyncio.create_task(queue_main_agent_response(user_input))
            formal_response_async_tasks.add(main_agent_task)

            # 处理智能体回答队列
            try:
                while True:
                    chunk = await asyncio.wait_for(formal_response_queue.get(), timeout=300)
                    if chunk is None: break # 遇到结束标记停止
                    print(chunk, end='')
                    yield chunk
            except asyncio.TimeoutError:
                yield json.dumps({"code": "408", "msg": "处理超时", "data": {}})

            await asyncio.wait(formal_response_async_tasks)

        except Exception as exc:
            yield json.dumps({"code": "500", "msg": f"出错: {exc}", "data": {}}) + "\n"
        finally:
            await clean_up_queue(formal_response_queue)

    return StreamingResponse(event_generator(), media_type="application/x-ndjson")


async def get_normal_answer(user_input):
    """ 产出批式回答 """
    result = await main_agent.ainvoke(
        {"messages": [{"role": "user", "content": user_input}]},
        cfg.thread_config_1
    )
    response_content = result['messages'][-1].content
    response_data = cfg.chat_response_template.copy()
    response_data['choices'][0]['message']['content'] = response_content
    return {
        "code": "200",
        "msg": "操作成功！",
        "data": json.dumps(response_data)
    }


# ========================= 【前后端】融合洞察页 - 生成洞察 取得各模式当前洞察结果 =========================
def distinguish_mode_data(agent_type: str):
    if agent_type in insight_result['current_modes']:
        insight_result['current_modes'].remove(agent_type)
        return {
            "summary": insight_result[agent_type]['summary'],
            "data": insight_result[agent_type]['data']
        }
    else:
        return None


# ========================= 【前后端】融合洞察页 - 生成洞察 图谱服务 =========================
@front_service.post(cfg.insight_graph_post_url)
async def respond_insight_graph_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": distinguish_mode_data('agent_graph')
        # "data": cfg.insight_graph_response_template.copy()  # 前端具备前调试生成洞察途径
    }
    return response_data


# ========================= 【前后端】融合洞察页 - 生成洞察 地图服务 =========================
@front_service.post(cfg.insight_map_post_url)
async def respond_insight_map_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": distinguish_mode_data('agent_map')
        # "data": cfg.insight_map_response_template.copy()  # 前端具备前调试生成洞察途径
    }
    return response_data


# ========================= 【前后端】融合洞察页 - 生成洞察 时序服务 =========================
@front_service.post(cfg.insight_time_series_post_url)
async def respond_insight_time_series_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": distinguish_mode_data('agent_time_series')
        # "data": cfg.insight_time_response_template.copy()  # 前端具备前调试生成洞察途径
    }
    return response_data


# ========================= 【前后端】融合洞察页 - 生成洞察 统算服务 =========================
@front_service.post(cfg.insight_stats_post_url)
async def respond_insight_stats_post():
    response_data = {
        "code": "200",
        "msg": "操作成功！",
        "data": distinguish_mode_data('agent_statistics')
        # "data": cfg.insight_stats_response_template.copy()  # 前端具备前调试生成洞察途径
    }
    return response_data


# # ========================= 【智能体】融合洞察页 - 用户输入获取服务 =========================
# class SubAgentRequest(BaseModel):
#     sub_agent_type: str
#     stream: bool
#
# @front_service.post('/agents/insight_request')
# async def respond_insight_request_post(subagent_request: SubAgentRequest):
#     print(f"💡协作智能体{subagent_request.sub_agent_type}申请获取用户的输入")
#     return {
#         "code": "200",
#         "msg": "操作成功！",
#         "insight_request": insight_request
#         # "insight_request": "洞察请求"
#     }


# ========================= 【智能体】融合洞察页 - 思维链收集服务 =========================
class SubAgentThinking(BaseModel):
    sub_agent_type: str
    stream: bool
    thinking_step: str

@front_service.post('/agents/insight_thinking_steps')
async def respond_insight_result_post(sub_agent_thinking: SubAgentThinking):
    thinking_step = f'🤝 协作智能体 **{sub_agent_thinking.sub_agent_type}**：{sub_agent_thinking.thinking_step}'
    print('\n💡', thinking_step)
    thinking_step_task = asyncio.create_task(queue_thinking_response(thinking_step, add_blank_line=True))
    thinking_response_async_tasks.add(thinking_step_task)
    print('\n💡Add thinking_step_task', thinking_response_async_tasks)
    return {
        "code": "200",
        "msg": "操作成功！",
        # "insight_result": insight_result[subagent_result.sub_agent_type]
        "insight_result": f"收到思考过程{sub_agent_thinking.thinking_step}"
    }


# ========================= 【智能体】融合洞察页 - 洞察结果收集服务 =========================
class SubAgentResult(BaseModel):
    sub_agent_type: str
    stream: bool
    updated_result: dict

@front_service.post('/agents/insight_result')
async def respond_insight_result_post(subagent_result: SubAgentResult):
    print(f"💡协作智能体{subagent_result.sub_agent_type}申请上传洞察结果")
    # 将协作智能体加入本次生成洞察列表
    insight_result['current_modes'].append(subagent_result.sub_agent_type)
    # 更新洞察结果
    insight_result[subagent_result.sub_agent_type] = subagent_result.updated_result
    # 返回结果加入思维链
    thinking_result_receive_task = asyncio.create_task(
        queue_thinking_response(
            f"🤝 智能体 **{subagent_result.sub_agent_type}** 向主智能体返回洞察结果",
            add_blank_line=True)
    )
    thinking_response_async_tasks.add(thinking_result_receive_task)
    print('\n💡 Add thinking_result_receive_task', thinking_response_async_tasks)
    print(f"💡协作智能体洞察结果更新")
    # 汇总结果加入思维链
    thinking_result_update_task = asyncio.create_task(
        queue_thinking_response(
            f"📋️ 已汇总智能体 **{subagent_result.sub_agent_type}** 的洞察结果",
            add_blank_line=True)
    )
    thinking_response_async_tasks.add(thinking_result_update_task)
    print('\n💡 Add thinking_result_update_task', thinking_response_async_tasks)

    return {
        "code": "200",
        "msg": "操作成功！",
        # "insight_result": insight_result[subagent_result.sub_agent_type]
        "insight_result": "收到洞察结果"
    }


if __name__ == "__main__":
    uvicorn.run(front_service, host="0.0.0.0", port=3000)

