import os
import json

from dotenv import load_dotenv
from pathlib import Path

from langchain_openai import ChatOpenAI
from langgraph.graph import END

from insight_agent.mcp_server_graph.src.mcp_server_graph.utils import graph_parsing
from insight_agent.mcp_server_graph.src.mcp_server_graph.utils.cypher_state import CypherState
from insight_agent.mcp_server_graph.src.mcp_server_graph.utils.graph_parsing import simplify_for_explain
from insight_agent.mcp_server_graph.src.mcp_server_graph.utils.graph_service import GraphService
from insight_agent.mcp_server_graph.src.mcp_server_graph.utils.http_post import *
from insight_agent.mcp_server_graph.src.mcp_server_graph.utils.neo4j_util import Neo4jDriver
from insight_agent.mcp_server_graph.src.mcp_server_graph.utils.timer_util import Timer

load_dotenv()

base_path = str(Path(__file__).resolve().parent)
base_path = base_path[:base_path.index('insight_agent')] + "insight_agent/mcp_server_graph/src/mcp_server_graph"

# 初始化 StateGraph，状态 schema 指明了所用状态类型

CYPHER_PREPARE_PROMPT_PATH = f"{base_path}/prompt/prepare_for_cypher_prompt.txt"
GRAPH_SCHEMA_PATH = f"{base_path}/prompt/graph_schema.json"
SAMPLE_RESPONSE_FORMAT_PATH = f"{base_path}/prompt/sample_response_format.json"
neo4j_driver = Neo4jDriver()

CYPHER_GENERATE_PROMPT_PATH = f"{base_path}/prompt/cypher_generation_prompt.txt"
CYPHER_GENERATE_RESPONSE_PATH = f"{base_path}/prompt/cypher_generation_response.json"

GROUP_PROFILE_CYPHER_PATH = f"{base_path}/scripts/group_profile.cypher"

USER_CAR_FAM_CYPHER_PATH = f"{base_path}/scripts/user_car_fam.cypher"
NEW_CLIENT_CYPHER_PATH = f"{base_path}/scripts/new_client.cypher"
GROUP_OPERATION_CYPHER_PATH = f"{base_path}/scripts/group_operation.cypher"
FRAUD_DETECTION_CYPHER_PATH = f"{base_path}/scripts/fraud_detection.cypher"
PRODUCT_FOR_GROUP_CYPHER_PATH = f"{base_path}/scripts/product_for_group.cypher"
GROUP_FAM_QUERY_CYPHER_PATH = f"{base_path}/scripts/group_family_query.cypher"
FRAUD_TARGET_CYPHER_PATH = f"{base_path}/scripts/fraud_target.cypher"
COMPETITION_RECOMMEND_CYPHER_PATH = f"{base_path}/scripts/competition_recommend.cypher"
REGULATION_RECOMMEND_CYPHER_PATH = f"{base_path}/scripts/regulation_recommend.cypher"
SAME_INDUSTRY_RECOMMEND_CYPHER_PATH = f"{base_path}/scripts/same_industry_recommend.cypher"
SUPPLY_CHAIN_RECOMMEND_CYPHER_PATH = f"{base_path}/scripts/supply_chain_recommend.cypher"
UPGRADE_RECOMMEND_CYPHER_PATH = f"{base_path}/scripts/upgrade_recommend.cypher"

# 准备生成cypher前的条件，选择schema以及采样数据

def _add_schema_sample_cypher(selects: list):
    nodes, edges = _parse_schema()
    select_nodes, select_edges, sample_cyphers = [], [], []
    for select_node_label in selects[0]:
        if select_node_label in nodes:
            node = nodes[select_node_label]
            select_nodes.append(node)
            # sample_cyphers.append(f"match (e:{select_node_label}) return e limit 10 //采样{node['description']}节点")
    for select_edge_type in selects[1]:
        if select_edge_type in edges:
            edge = edges[select_edge_type]
            select_edges.append(edge)
            sample_cyphers.append(
                f"match (e1)-[r:{select_edge_type}]->(e2) return e1, e2, r limit 10 //采样{edge['description']}边")
    select_schema = str({"node_types": select_nodes, "edge_types": select_edges})
    return select_schema, sample_cyphers


nodes, edges = {}, {}


def _parse_schema():
    if not nodes and not edges:
        graph_schema = _read_file(GRAPH_SCHEMA_PATH)
        graph_schema_json = json.loads(graph_schema)
        for node in graph_schema_json["node_types"]:
            nodes[node["label"]] = node
        for edge in graph_schema_json["edge_types"]:
            edges[edge["type"]] = edge
    return nodes, edges


async def prepare_cypher(state: CypherState):
    inform_insight_thinking("agent_graph", f"嗯，用户跟我说{state["user_question"]}，我需要对图数据进行采样，匹配用户意图")

    prompt = _read_file(CYPHER_PREPARE_PROMPT_PATH)
    graph_schema = _read_file(GRAPH_SCHEMA_PATH)
    sample_response_format = _read_file(SAMPLE_RESPONSE_FORMAT_PATH)
    empty = _is_empty(state)

    user_question = state["user_question"]

    prompt = prompt.format(user_question, graph_schema, sample_response_format, empty, state["preCypher"][-3:],
                           state["preCypherResult"][-3:])
    sample_gen_timer = Timer(f"大模型理解和采样(prompt长度{len(prompt)})")
    sample_gen_timer.start()
    response = await llm.ainvoke(prompt)
    data = response.content
    duration = sample_gen_timer.end()
    print(f"生成速度(结果长度{len(data)})：{len(data) / duration} token/s")
    data = data.replace('json', '').replace('```', '')
    datajson = json.loads(data)
    select_schema, sample_cyphers = _add_schema_sample_cypher(datajson["selectSchema"])
    inform_insight_thinking("agent_graph",
                            f"图数据schema中最匹配的点边类型是：{"、".join(datajson["selectSchema"][0])}，"
                            f"边类型是：{"、".join(datajson["selectSchema"][1])}"
                            )
    state["selectSchema"] = select_schema
    state["sampleCypher"] = sample_cyphers
    state["sampleReason"] = datajson["sampleReason"]
    state["scenario"] = datajson["scenario"]
    print(f"业务编码：{state["scenario"]}")
    inform_insight_thinking("agent_graph", "已提取出关键schema")
    if empty and "emptyReason" in datajson:
        state["retry_count"] += 1
        inform_insight_thinking("agent_graph", f"前置查询为空, 原因分析：{datajson["emptyReason"]}")
        inform_insight_thinking("agent_graph", f"重试次数：{state["retry_count"]}")
    if state["sampleReason"] and len(state["sampleReason"]) > 0:
        inform_insight_thinking("agent_graph", f"采样逻辑：{state["sampleReason"]}")
        inform_insight_thinking("agent_graph", "正在采样图数据库")
    return {
        "selectSchema": state["selectSchema"],
        "scenario": state["scenario"],
        "sampleReason": state["sampleReason"],
        "sampleCypher": state["sampleCypher"],
        "retry_count": state["retry_count"],
        "error": state["error"]
    }


async def direct_search(state: CypherState):
    print("direct_search")
    cyphers = [_read_file(USER_CAR_FAM_CYPHER_PATH),
               _read_file(NEW_CLIENT_CYPHER_PATH),
               _read_file(GROUP_OPERATION_CYPHER_PATH),
               _read_file(FRAUD_DETECTION_CYPHER_PATH),
               _read_file(PRODUCT_FOR_GROUP_CYPHER_PATH),
               _read_file(GROUP_FAM_QUERY_CYPHER_PATH),
               _read_file(FRAUD_TARGET_CYPHER_PATH),
               _read_file(COMPETITION_RECOMMEND_CYPHER_PATH),
               _read_file(REGULATION_RECOMMEND_CYPHER_PATH),
               _read_file(SAME_INDUSTRY_RECOMMEND_CYPHER_PATH),
               _read_file(SUPPLY_CHAIN_RECOMMEND_CYPHER_PATH),
               _read_file(UPGRADE_RECOMMEND_CYPHER_PATH),
               ]
    state["cypher"] = cyphers[state["scenario"] - 1]
    return {"cypher": state["cypher"]}


async def sample_neo4j(state: CypherState):
    sample_neo4j_timer = Timer(f"Neo4j采样查询({len(state["sampleCypher"])}条语句)")
    sample_neo4j_timer.start()
    # 采样cypher语句放sampleCypher中，防止重复执行preCypher
    for cypher in state["sampleCypher"]:
        print(f"执行cypher:{cypher}")
        try:
            # 用 Neo4j 执行查询
            records, summary, keys = neo4j_driver.execute(cypher)
            state["preCypher"].append(cypher)
            state["preCypherResult"].append(graph_parsing.parse_graph(records, keys))
            state["error"] = None
            print(f"采样到的图数据：{state["preCypherResult"][-1]}")
            inform_insight_thinking("agent_graph", f"已采样图数据(长度{len(str(state["preCypherResult"][-1]))})")

        except Exception as e:
            state["preCypherResult"].append([str(e)])
            # state["error"] = str(e)
            print(f"查询发生报错：\n{e}")
            raise e
    sample_neo4j_timer.end()
    state["sampleCypher"] = []  # 重置采样cypher队列
    return {
        "sampleCypher": state["sampleCypher"],
        "preCypher": state["preCypher"],
        "preCypherResult": state["preCypherResult"],
        "error": state["error"]
    }




# 后处理适配任意输出格式，以减轻cypher生成中字段处理等的负担
# 节点操作函数
async def generate_cypher(state: CypherState):
    inform_insight_thinking("agent_graph", "开始生成cypher语句")

    prompt = _read_file(CYPHER_GENERATE_PROMPT_PATH)

    user_car_fam_cypher = _read_file(USER_CAR_FAM_CYPHER_PATH)
    new_client_cypher = _read_file(NEW_CLIENT_CYPHER_PATH)
    group_operation_cypher = _read_file(GROUP_OPERATION_CYPHER_PATH)
    fraud_detection_cypher = _read_file(FRAUD_DETECTION_CYPHER_PATH)
    product_for_group_cypher = _read_file(PRODUCT_FOR_GROUP_CYPHER_PATH)
    group_profile_cypher = _read_file(GROUP_PROFILE_CYPHER_PATH)

    response_format = _read_file(CYPHER_GENERATE_RESPONSE_PATH)

    prompt = prompt.format(
        state["user_question"],
        state["selectSchema"],
        state["sampleReason"],
        state["preCypher"],
        state["preCypherResult"],
        user_car_fam_cypher,
        new_client_cypher,
        group_operation_cypher,
        fraud_detection_cypher,
        product_for_group_cypher,
        group_profile_cypher,
        response_format,
    )

    prompt = _prompt_length_protect(prompt)  # 长度保护
    cypher_gen_timer = Timer(f"大模型生成cypher语句(prompt长度{len(prompt)})")
    cypher_gen_timer.start()
    response = await llm.ainvoke(prompt)
    data = response.content
    duration = cypher_gen_timer.end()
    print(f"生成速度(结果长度{len(data)})：{len(data) / duration} token/s")
    data = data.replace('json', '')
    data = data.replace('```', '')
    datajson = json.loads(data)
    state["cypher"] = datajson['cypher']  # 假设接口返回字符串
    state["reason"] = datajson['reason']  # 假设接口返回字符串
    print(f"前置信息分析：{datajson['preInfo']}")
    inform_insight_thinking("agent_graph", f"{datajson['reason']}")
    # print(f"生成的cypher语句：{datajson['cypher']}"
    print(f"生成的cypher语句：{datajson['cypher']}")

    return {"cypher": state["cypher"], "reason": state["reason"]}


async def query_neo4j(state: CypherState):
    inform_insight_thinking("agent_graph", "正在查询图数据库")
    neo4j_query_timer = Timer("查询图数据库")
    neo4j_query_timer.start()
    try:
        # 实际代码用 Neo4j 驱动执行查询
        state["preCypher"].append(state['cypher'])
        print(state["cypher"])
        records, summary, keys = neo4j_driver.execute(state['cypher'])
        state["query_result"] = graph_parsing.parse_graph(records, keys)
        state["preCypherResult"].append(state["query_result"])
        state["error"] = None
        inform_insight_thinking("agent_graph",
                                f"查询到的图数据：{len(state["query_result"][0])}个点，{len(state["query_result"][1])}条边")

    except Exception as e:
        state["query_result"] = None
        state["error"] = str(e)
        state["preCypherResult"].append(str(e))
        print(f"查询发生报错：\n{e}")
        inform_insight_thinking("agent_graph", f"查询发生报错")
    neo4j_query_timer.end()
    return {
        "query_result": state["query_result"],
        "preCypher": state["preCypher"],
        "preCypherResult": state["preCypherResult"],
        "error": state["error"]
    }


def check_success(state: CypherState):
    inform_insight_thinking("agent_graph", "开始检查是否成功 ")
    inform_insight_thinking("agent_graph", f'error: {state["error"]} ')
    inform_insight_thinking("agent_graph", f'query_result: {state["query_result"]}')
    return (state["error"] is None) and (state["query_result"])


CYPHER_TUNE_PROMPT_PATH = f"{base_path}/prompt/cypher_tune_prompt.txt"


# done: write better fixing prompt, return in parsable format
# todo: memory fixing techs ?
async def fix_cypher(state: CypherState):
    state["retry_count"] += 1
    inform_insight_thinking("agent_graph", f"开始修复cypher语句, 第{state["retry_count"]}次")
    inform_insight_thinking("agent_graph", f"重试次数：{state["retry_count"]}")

    prompt = _read_file(CYPHER_TUNE_PROMPT_PATH)
    prompt = prompt.format(
        state['user_question'],
        state['reason'],
        state['cypher'],
        state['error'],
    )
    prompt = prompt.replace("{{", "{").replace("}}", "}")
    cypher_fix_timer = Timer(f"修复cypher语句(prompt长度{len(prompt)})")
    cypher_fix_timer.start()
    response = await llm.ainvoke(prompt)
    data = response.content
    duration = cypher_fix_timer.end()
    print(f"生成速度(结果长度{len(data)})：{len(data) / duration} token/s")
    data = data.replace('json', '')
    data = data.replace('```', '')
    datajson = json.loads(data)
    print(f"修复后的cypher: /n{datajson["fixedCypher"]}".replace("\n", ""))
    inform_insight_thinking("agent_graph", f"错误分析思路: /n{datajson["errorReason"]}")
    state["cypher"] = datajson["fixedCypher"]
    state["error"] = None
    return {"cypher": state["cypher"], "retry_count": state["retry_count"], "error": state["error"]}


async def interpret_result(state: CypherState):
    # done: transform graphData for interpreting
    vertex_set, edge_set, other = state['query_result']
    # done: better interpreting system prompt
    report = ("text2cypher完整执行报告\n"
              f"用户问题：{state['user_question']}\n\n"
              f"采样逻辑：{state['sampleReason']}\n\n"
              f"查询逻辑：{state['reason']}\n\n"
              f"重试次数：{state['retry_count']}\n\n"
              f"查询语句：{state['preCypher']}\n\n"
              # f"查询结果：{final['preCypherResult']}\n\n"
              )
    if _is_empty(state):
        report += f"数据缺失原因：{state['emptyReason']}"
    # print(report)
    prompt = ("你是中国移动自然人洞察平台助手的一个智能助手，一个完整的工作流是用户提出问题，系统采样数据，"
              "系统分析思路并生成cypher语句，从neo4j查询数据，解读数据。你的工作是负责最后一步的解读，不超过300字。"
              "假如数据为空，则对查不到数据进行原因分析"
              f"用户问题：{state['user_question']}\n\n"
              f"采样逻辑：{state['sampleReason']}\n\n"
              f"查询逻辑：{state['reason']}\n\n"
              f"重试次数：{state['retry_count']}\n\n"
              f"查询语句：{state['preCypher']}\n\n")

    # done: stats and profile add to interpretation
    if "profile" in other or "stats" in other:
        prompt += "\n画像与统计信息：\n"
        if "profile" in other:
            prompt += f"\nprofile: {other["profile"]}\n"
        if "stats" in other:
            prompt += f"\nstats: {other["stats"]}"

    # 压缩数据逻辑，单纯截断
    vertex_list, edge_list = simplify_for_explain(vertex_set, edge_set)  # 转为列表去掉重复信息
    if len(str(vertex_list) + str(edge_list)) > 130000:
        print("数据长度超过130000，开始单纯截断数据")
        sampled_vertex_list = _sample_result(vertex_list, 100)  # 点不超过100
        sampled_edge_list = _sample_result(edge_list, 300)  # 边不超过300
        prompt += f"查询到的数据是{(sampled_vertex_list, sampled_edge_list, other)}"
    else:
        prompt += f"查询到的数据是{(vertex_list, edge_list, other)}"

    prompt = _prompt_length_protect(prompt)  # prompt长度保护
    inform_insight_thinking("agent_graph", f"正在解读{len(prompt)}个字符长度的数据\n")
    interpretation_timer = Timer(f"数据解读(prompt长度{len(prompt)})")
    interpretation_timer.start()
    response = await llm.ainvoke(prompt)
    data = response.content
    duration = interpretation_timer.end()
    print(f"生成速度(结果长度{len(data)})：{len(data) / duration} token/s")
    print(f"解读结果：{data}")

    return {"interpretation": data}

##### multi-thread part ######

async def generate_cypher_parallel(state: CypherState):
    prompt = _read_file(CYPHER_GENERATE_PROMPT_PATH)

    user_car_fam_cypher = _read_file(USER_CAR_FAM_CYPHER_PATH)
    new_client_cypher = _read_file(NEW_CLIENT_CYPHER_PATH)
    group_operation_cypher = _read_file(GROUP_OPERATION_CYPHER_PATH)
    fraud_detection_cypher = _read_file(FRAUD_DETECTION_CYPHER_PATH)
    product_for_group_cypher = _read_file(PRODUCT_FOR_GROUP_CYPHER_PATH)
    group_profile_cypher = _read_file(GROUP_PROFILE_CYPHER_PATH)

    response_format = _read_file(CYPHER_GENERATE_RESPONSE_PATH)

    prompt = prompt.format(
        state["user_question"],
        state["selectSchema"],
        state["sampleReason"],
        state["preCypher"],
        state["preCypherResult"],
        user_car_fam_cypher,
        new_client_cypher,
        group_operation_cypher,
        fraud_detection_cypher,
        product_for_group_cypher,
        group_profile_cypher,
        response_format,
    )

    prompt = _prompt_length_protect(prompt)  # 长度保护
 
    response = await llm.ainvoke(prompt)
    data = response.content

    data = data.replace('json', '')
    data = data.replace('```', '')
    datajson = json.loads(data)
    cypher = datajson['cypher']  # 假设接口返回字符串
    reason = datajson['reason']  # 假设接口返回字符串


    return {"cypher": cypher, "reason": reason}

async def query_neo4j_parallel(cypher):
    # inform_insight_thinking("agent_graph", "正在查询图数据库")
    neo4j_query_timer = Timer("查询图数据库")
    neo4j_query_timer.start()
    try:
        # 实际代码用 Neo4j 驱动执行查询
        records, summary, keys = neo4j_driver.execute(cypher)
        query_result = graph_parsing.parse_graph(records, keys)
        error = None
        preCypherResult = query_result
        inform_insight_thinking("agent_graph",
                                f"查询到的图数据：{len(query_result[0])}个点，{len(query_result[1])}条边")

    except Exception as e:
        query_result = None
        error = str(e)
        preCypherResult = str(e)
        print(f"查询发生报错：\n{e}")
        
        # inform_insight_thinking("agent_graph", f"查询发生报错")
    neo4j_query_timer.end()
    return {
        "query_result": query_result,
        "preCypher": cypher,
        "preCypherResult": preCypherResult,
        "error": error
    }


async def generate_cypher_unit_task(state: CypherState, varient_id: int):
    # inform_insight_thinking("agent_graph", "开始生成cypher语句")
    print(f"{varient_id}:开始生成cypher语句")
    cypher_gen_timer = Timer(f"{varient_id}:开始生成cypher语句")
    cypher_gen_timer.start()
    cypher_result = await generate_cypher_parallel(state)
    print(f"生成语句：/r/n{cypher_result["cypher"]}")
    cypher_gen_timer.end()

    # 所有任务都在使用同一个state
    print(f"==============varient_id:{varient_id}=============")
    print(id(state))
    print(f"==================================================")


    print(f"{varient_id}:开始查询图数据库")
    query_result = await query_neo4j_parallel(cypher_result["cypher"])
    # 合并 cypher 与 reason，便于上层直接使用
    query_result["cypher"] = cypher_result.get("cypher")
    query_result["reason"] = cypher_result.get("reason")
    return query_result

import asyncio
async def parallel_processing(state: CypherState):
    inform_insight_thinking("agent_graph", "开始并行处理")

    # 状态初始化
    state["error"] = None
    # 并发启动多个变体任务
    tasks = [asyncio.create_task(generate_cypher_unit_task(state, i)) for i in range(state["parallel_count"])]
    try:
        # 等待所有任务完成
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 检查所有结果，选择最佳结果
        for i, result in enumerate(results):
                
            # 检查标准1: 查询neo4j过程没有报错
            if result and result.get("error") is None:
                query_result = result.get("query_result")
                if query_result:
                    
                    state["query_result"] = query_result
                    state["preCypherResult"].append(result.get("preCypherResult"))
                    state["preCypher"].append(result.get("preCypher"))
                    # 检查标准2: 查询结果的点和边个数不为0
                    if not _is_empty(state):
                        
                        state["cypher"] = result.get("cypher")
                        state["reason"] = result.get("reason")
                        state["error"] = None
                        print(f"选择任务 {i} 的结果: 节点数={len(state["query_result"][0])}, 边数={len(state["query_result"][1])}")
                        
                        # 忽略其他结果，返回正确的结果
                        return {
                            "query_result": state["query_result"],
                            "preCypher": state["preCypher"],
                            "preCypherResult": state["preCypherResult"],
                            "error": None,
                            "cypher": state["cypher"],
                            "reason": state["reason"]
                        }
                    else:
                        # 标记存在空结果的情况
                        state["error"] = "EMPTY_RESULT"

            elif result and result.get("error"):
                # 任务失败的情况，如果之前有统计出空结果，则不记录错误，后续直接回退到prepare cypher阶段
                if state["error"] != "EMPTY_RESULT":
                    state["query_result"] = None
                    if state["error"] is not None:
                        state["error"].append(result.get("error"))
                    else:
                        state["error"] = [result.get("error")]
              
        return {
            "query_result": state["query_result"], 
            "preCypher": state["preCypher"], 
            "preCypherResult": state["preCypherResult"], 
            "error": state["error"]
            }
    
    # return之前保证所有任务结束
    finally:
        # 双重保险，确保没有遗留任务
        for t in tasks:
            if not t.done():
                t.cancel()
                try:
                    await t
                except asyncio.CancelledError:
                    pass

async def fix_cypher_unit_task(state: CypherState, varient_id: int):
    prompt = _read_file(CYPHER_TUNE_PROMPT_PATH)
    prompt = prompt.format(
        state['user_question'],
        state['reason'],
        state['cypher'],
        state['error'][varient_id],
    )
    prompt = prompt.replace("{{", "{").replace("}}", "}")
    # cypher_fix_timer = Timer(f"修复cypher语句(prompt长度{len(prompt)})")
    # cypher_fix_timer.start()
    response = await llm.ainvoke(prompt)
    data = response.content
    # duration = cypher_fix_timer.end()
    # print(f"生成速度(结果长度{len(data)})：{len(data) / duration} token/s")
    data = data.replace('json', '')
    data = data.replace('```', '')
    datajson = json.loads(data)
    # print(f"修复后的cypher: /n{datajson["fixedCypher"]}".replace("\n", ""))
    inform_insight_thinking("agent_graph", f"错误分析思路: /n{datajson["errorReason"]}")
    state["cypher"] = datajson["fixedCypher"]
    state["error"] = None
    query_result = await query_neo4j_parallel(state["cypher"])
    return query_result


# 当并行生成的多个cypher全部报错，执行该函数
async def fix_cypher_parallel(state: CypherState):
    state["retry_count"] += 1
    inform_insight_thinking("agent_graph", f"开始修复cypher语句, 第{state["retry_count"]}次")
    inform_insight_thinking("agent_graph", f"重试次数：{state["retry_count"]}")

    tasks = [asyncio.create_task(fix_cypher_unit_task(state, i)) for i in range(state["parallel_count"])]

    try:
        # 等待所有任务完成
        results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 检查所有结果，选择最佳结果
        for i, result in enumerate(results):
                
            # 检查标准1: 查询neo4j过程没有报错
            if result and result.get("error") is None:
                query_result = result.get("query_result")
                if query_result:
                    
                    state["query_result"] = query_result
                    state["preCypherResult"].append(result.get("preCypherResult"))
                    state["preCypher"].append(result.get("preCypher"))
                    # 检查标准2: 查询结果的点和边个数不为0
                    if not _is_empty(state):
                        
                        # state["cypher"] = result.get("cypher")
                        # state["reason"] = result.get("reason")
                        state["error"] = None
                        print(f"选择任务 {i} 的结果: 节点数={len(state["query_result"][0])}, 边数={len(state["query_result"][1])}")
                        
                        # 忽略其他结果，返回正确的结果
                        return {
                            "query_result": state["query_result"],
                            "preCypher": state["preCypher"],
                            "preCypherResult": state["preCypherResult"],
                            "error": None,
                            "cypher": state["cypher"],
                            "reason": state["reason"]
                        }
                    else:
                        # 标记存在空结果的情况
                        state["error"] = "EMPTY_RESULT"

            elif result and result.get("error"):
                # 任务失败的情况，如果之前有统计出空结果，则不记录错误，后续直接回退到prepare cypher阶段
                if state["error"] != "EMPTY_RESULT":
                    state["query_result"] = None
                    if isinstance(state.get("error"), list):
                        state["error"].append(result.get("error"))
                    else:
                        state["error"] = [result.get("error")]
              
        return {
            "query_result": state["query_result"], 
            "preCypher": state["preCypher"], 
            "preCypherResult": state["preCypherResult"], 
            "error": state["error"],
            "retry_count": state["retry_count"]
            }
    finally:
        # 双重保险，确保没有遗留任务
        for t in tasks:
            if not t.done():
                t.cancel()
                try:
                    await t
                except asyncio.CancelledError:
                    pass

    # return {"cypher": state["cypher"], "retry_count": state["retry_count"], "error": state["error"]}

""" condition edge """
# 添加条件边，根据状态决定下一步
def after_query_decide_next_parallel(state: CypherState):
    if state["error"] is None:
        if not _is_empty(state) or state["retry_count"] >= state.get("max_retries"):
            return "InterpretResult"
        else:
            return "PrepareCypher"
    elif state["retry_count"] < state.get("max_retries"):
        return "FixCypherParallel"
    else:
        return END


# 添加条件边，根据状态决定下一步
def after_query_decide_next(state: CypherState):
    if state["error"] is None:
        if not _is_empty(state) or state["retry_count"] >= state.get("max_retries"):
            return "InterpretResult"
        else:
            return "PrepareCypher"
    elif state["retry_count"] < state.get("max_retries"):
        return "FixCypher"
    else:
        return END


def after_prepare_decide_next(state: CypherState):
    if state["scenario"] in range(1, 13):
        return "DirectSearch"
        # return "SampleNeo4j"
    elif state["scenario"] == 0:
        return "SampleNeo4j"
    else:
        return END


""" HELPERS """


def _read_file(path):
    return open(path, "r", encoding="utf-8").read()


def _prompt_length_protect(prompt):
    if len(prompt) > 130000:
        return prompt[:130000] + "...\n由于长度限制，后面数据省略"
    return prompt


def _is_empty(state: CypherState):
    if not state["query_result"]:
        return False
    return state["query_result"] and len(state["query_result"][0]) == 0 and len(state["query_result"][1]) == 0


def _generate_llm():
    return ChatOpenAI(temperature=0,
                      model=os.getenv("MODEL"),
                      base_url=os.getenv('BASE_URL'),
                      api_key=os.getenv('LLM_API_KEY'),
                      max_retries=5
                      )


llm = _generate_llm()


def _sample_result(original_list: dict, size):
    import random
    size = min(len(original_list), size)
    sampled_indices = random.sample(list(range(len(original_list))), size)
    # 用抽取的键构建新字典
    sampled_list = {k: original_list[k] for k in sampled_indices}
    return sampled_list


def _transform_graph_data(records):
    graph_service = GraphService()
    subgraphs = list(record.data() for record in records)
    # 二度邻居图数据后处理, 组装为返回格式
    vertex_set = {}
    edge_set = {}
    for subgraph in subgraphs:
        for node in subgraph["graphData"]["nodes"]:
            graph_service._add_node_base(vertex_set, node)
            # 预留节点颜色的设置, 置空时候前端显示默认颜色
            # node["color"] = "#FFC0CB"

        for rel in subgraph["graphData"]["relationships"]:
            graph_service._add_edge_base(edge_set, rel)
    return graph_service._convert_ids(vertex_set, edge_set)
