import sys
import os
from venv import logger

# 添加当前文件所在目录到Python路径
current_dir = os.path.dirname(os.path.abspath(__file__))
if current_dir not in sys.path:
    sys.path.insert(0, current_dir)

from typing import TypedDict, Dict, List, Annotated, Union, Literal
from langgraph.constants import START, END
from langgraph.graph import add_messages, StateGraph
import json
from EnvironmentService import EnvironmentService
from langchain_core.messages import HumanMessage, BaseMessage, ToolMessage
import re
from RetrieverTools import (find_class, find_method_definition, find_variable_definition, find_method_calls,
                            find_method_usages, fuzzy_search, search_similarity_test_class, graph_retriever,)
                            # human_assistance)
from PromptTemplate import Generator_Init_State_Template, Execution_Review_Template, \
    Generator_Init_State_with_Function_Information_Template, Generator_Update_Init_State_Template, \
    Generator_Update_Init_State_with_Function_Information_Template
from langgraph.pregel import RetryPolicy
from langchain.tools.render import render_text_description
from Config import *
from Utils import extract_code, extract_json

from langchain.agents import create_openai_functions_agent, AgentExecutor
from langchain.prompts import ChatPromptTemplate

# Tool definitions
if Limit_Retrieve_Test_Case:
    tools = [
        find_class,
        find_method_definition,
        find_variable_definition,
        find_method_calls,
        find_method_usages,
        fuzzy_search,
        # human_assistance
    ]
else:
    tools = [
        find_class,
        find_method_definition,
        find_variable_definition,
        find_method_calls,
        find_method_usages,
        fuzzy_search,
        # human_assistance,
        search_similarity_test_class
    ]
# tool_node = ToolNode(tools)

# LLM initialization, defined in `Config.py`
if Enable_Native_Function_Call:
    llm_with_tools = llm.bind_tools(tools)
    # llm_with_tools = llm  # For ablation experiments
    # 定义系统 + 用户消息模板
    # prompt = ChatPromptTemplate.from_messages([
    #     ("system", "You are a helpful AI assistant that can use tools."),
    #     ("user", "{input}"),
    #     ("user", "{agent_scratchpad}")
    # ])
    #
    # # 创建 Agent，将 llm 和工具绑定
    # agent = create_openai_functions_agent(llm, tools, prompt)
    #
    # # 创建执行器
    # llm_with_tools = AgentExecutor(agent=agent, tools=tools, verbose=True)
else:
    llm_with_tools = llm
    rendered_tools = render_text_description(tools)


# State definition
class GeneratorState(TypedDict):
    envServer: EnvironmentService  # Environment service
    feedback_times: int  # Number of feedback iterations
    max_feedback_times: int  # Maximum number of feedback iterations
    find_bug: bool  # Whether a bug is found
    package_name: str  # Package name
    method_code: str  # Code of the method under test
    method_signature: str  # Signature of the method under test
    class_name: str  # Name of the containing class
    full_method_name: str  # Fully qualified method name
    start_line: int  # Start line of the method under test
    end_line: int  # End line of the method under test
    method_summary: str  # Summary of the method's main functionality
    requirement: Dict  # Generated requirement list
    test_case: str  # Generated test case
    test_class_name: str  # Test class name
    compile_result: bool  # Compilation result
    execute_result: bool  # Execution result
    test_result: str  # Test result
    test_report: str  # Test report
    coverage_report: str  # Coverage report
    mutation_report: str  # Mutation test report
    # Interaction history (for LLM processing)
    messages: Annotated[List[Union[dict, BaseMessage]], add_messages]
    log_message: list  # Log messages
    generate_or_evaluation: Literal["generation", "evaluation"]  # Generation or evaluation mode


def generator_init_state(state: GeneratorState):
    """
    Initialization state
    """
    requirement = state["requirement"]
    requirement.pop("test_case", None)
    test_class_name = ""
    if state["test_case"] == "":

        prompt_information = {
            "class_name": state["class_name"],
            "package_name": state["package_name"],
            "method_signature": state["method_signature"],
            "method_summary": state["method_summary"],
            "test_specification": json.dumps(requirement, indent=4),
        }
        if Enable_Native_Function_Call:
            init_prompt = Generator_Init_State_Template.invoke(prompt_information)
        else:
            # If the LLM does not support native function call, add the tools description to the system prompt
            prompt_information["rendered_tools"] = rendered_tools
            init_prompt = Generator_Init_State_with_Function_Information_Template.invoke(prompt_information)
    else:
        class_pattern = r'public class\s+(\w+)\s*(?:\{|\s+extends|\s+implements|$)'
        matches = re.findall(class_pattern, state["test_case"])
        test_class_name = matches[0]
        prompt_information = {
            "class_name": state["class_name"],
            "package_name": state["package_name"],
            "method_signature": state["method_signature"],
            "method_summary": state["method_summary"],
            "test_specification": json.dumps(requirement, indent=4),
            "test_case": state["test_case"],
        }
        if Enable_Native_Function_Call:
            init_prompt = Generator_Update_Init_State_Template.invoke(prompt_information)
        else:
            # If the LLM does not support native function call, add the tools description to the system prompt
            prompt_information["rendered_tools"] = rendered_tools
            init_prompt = Generator_Update_Init_State_with_Function_Information_Template.invoke(prompt_information)
    valid_prompt = init_prompt.to_messages()
    return {"messages": valid_prompt, "feedback_times": 0, "max_feedback_times": 3, "find_bug": False,
            "log_message": [], "compile_result": False, "execute_result": False, "test_case": None,
            "test_class_name": test_class_name}


def testMethodGenerator(state: GeneratorState):
    """
    **State Node**: Test Case Generation
    The core state of the Executor state machine. Given the information in the messages dialogue flow, it performs model invocation and parses and processes the returned results.
    This includes tool invocation, test case generation, and feedback for non-compliant content.
    """
    result = llm_with_tools.invoke(state["messages"])
    tools_by_name = {tool.name: tool for tool in tools}
    state["messages"].append(result)
    log = []
    try_times = 0
    max_try_times = 3
    while try_times < max_try_times:
        if Enable_Native_Function_Call and result.tool_calls:
            for tool_call in result.tool_calls:
                if tool_call["name"] not in tools_by_name:
                    feedback = HumanMessage(
                        f"Tool {tool_call['name']} not found. please check the tool name and try again."
                        f"Available tools are: {', '.join(tools_by_name.keys())}"
                    )
                    state["messages"].append(feedback)
                    log.append(result)
                    log.append(feedback)
                    result = llm_with_tools.invoke(state["messages"])
                    state["messages"].append(result)
                    try_times += 1
                    continue
            break
        elif not Enable_Native_Function_Call:
            expected_schema = {"name": str, "arguments": dict}
            ret, intention = extract_json(result.content, expected_schema)
            check_result = False
            if type(ret) is list:
                for r in ret:
                    if r["name"] not in tools_by_name:
                        break
                check_result = True
            if intention and not check_result:
                feedback = HumanMessage(
                    "If you want to use tools, please specify the tool name and arguments in the JSON format. "
                    "For example, ```json\n{'name': 'find_class', 'arguments': {'class_name': 'Test'}}\n```"
                    "Available tools are: " + ', '.join(tools_by_name.keys()) + "."
                                                                                "Please try again./no_think"
                )
                state["messages"].append(feedback)
                log.append(result)
                log.append(feedback)
                result = llm_with_tools.invoke(state["messages"])
                state["messages"].append(result)
                try_times += 1
                continue
            break
        else:
            break

    return {"messages": [result], "log_message": log}


def call_tool(state: GeneratorState):
    """
    **State Node**: Tool Invocation
    Invokes tools, including both LLMs that support native function calls and those that do not, and returns the tool invocation results.
    """
    tools_by_name = {tool.name: tool for tool in tools}
    messages = state["messages"]
    last_message = messages[-1]
    output_messages = []
    if Enable_Native_Function_Call:
        for tool_call in last_message.tool_calls:
            try:
                if tool_call["name"] == "search_similarity_test_class":
                    tool_result = tools_by_name[tool_call["name"]].invoke(state["test_class_name"])
                else:
                    tool_result = tools_by_name[tool_call["name"]].invoke(tool_call["args"])
                output_messages.append(
                    ToolMessage(
                        content=json.dumps(tool_result),
                        name=tool_call["name"],
                        tool_call_id=tool_call["id"],
                    )
                )
                if len(messages) > 40:
                    output_messages.append(
                        HumanMessage(
                            "You have searched too many times, please generate test case directly. /no_think"
                        )
                    )
            except Exception as e:
                # Return the error if the tool call fails
                output_messages.append(
                    ToolMessage(
                        content=f"error: {e}, do not retry this tool call",
                        name=tool_call["name"],
                        tool_call_id=tool_call["id"]
                    )
                )
    else:
        expected_schema = {"name": str, "arguments": dict}
        results, intention = extract_json(last_message.content, expected_schema)
        assert results, "Tool call not found. Generally it is impossible to reach here."
        for tool_call in results:
            try:
                if tool_call["name"] == "search_similarity_test_class":
                    tool_result = tools_by_name[tool_call["name"]].invoke(state["test_class_name"])
                else:
                    tool_result = tools_by_name[tool_call["name"]].invoke(tool_call["arguments"])
                output_messages.append(
                    HumanMessage(
                        f"Tool call: {tool_call['name']}, arguments: {tool_call['arguments']}, result: {json.dumps(tool_result)}"
                    )
                )
            except Exception as e:
                # Return the error if the tool call fails
                output_messages.append(
                    HumanMessage(
                        f"Tool call: {tool_call['name']}, arguments: {tool_call['arguments']}, error: {e}"
                    )
                )
    return {"messages": output_messages}


def generatorChecker(state: GeneratorState):
    """
    **Branch Decision**
    Determines the next execution path based on the output of the 'Test Case Generation' node.
    If the result contains a tool invocation, proceed to the tool invocation node.
    Otherwise, proceed to the code extraction node.
    """
    messages = state["messages"]
    last_message = messages[-1]
    if Enable_Native_Function_Call:
        if last_message.tool_calls:
            return "tools"
        return "codeExtractor"
    else:
        expected_schema = {"name": str, "arguments": dict}
        result, intention = extract_json(last_message.content, expected_schema)
        if result:
            return "tools"
        return "codeExtractor"

# def generatorChecker(state: GeneratorState):
#     """
#     **Branch Decision**
#     修改：在工具调用前插入人工确认检查
#     """
#     messages = state["messages"]
#     last_message = messages[-1]
#
#     # 检查是否有需要确认的工具调用
#     has_tool_calls = False
#
#     if Enable_Native_Function_Call:
#         if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
#             # 检查是否有非human_assistance的工具调用
#             for tool_call in last_message.tool_calls:
#                 if tool_call["name"] != "human_assistance":
#                     has_tool_calls = True
#                     break
#     else:
#         expected_schema = {"name": str, "arguments": dict}
#         results, intention = extract_json(last_message.content, expected_schema)
#         if results:
#             for tool_call in results:
#                 if tool_call["name"] != "human_assistance":
#                     has_tool_calls = True
#                     break
#
#     if has_tool_calls:
#         return "human_confirmation"  # 先进行人工确认
#     elif (Enable_Native_Function_Call and hasattr(last_message, 'tool_calls') and last_message.tool_calls) or \
#             (not Enable_Native_Function_Call and extract_json(last_message.content, {"name": str, "arguments": dict})[
#                 0]):
#         return "tools"  # 只有human_assistance工具调用，直接执行
#     else:
#         return "codeExtractor"
#
#
# def human_confirmation_node(state: GeneratorState):
#     """
#     人工确认节点：处理工具调用前的人工确认
#     """
#     messages = state["messages"]
#     last_message = messages[-1]
#
#     # 检查是否需要人工确认的工具调用
#     tool_calls_to_confirm = []
#
#     if Enable_Native_Function_Call and hasattr(last_message, 'tool_calls') and last_message.tool_calls:
#         for tool_call in last_message.tool_calls:
#             if tool_call["name"] != "human_assistance":  # 排除人工确认工具本身
#                 tool_calls_to_confirm.append(tool_call)
#
#     elif not Enable_Native_Function_Call:
#         expected_schema = {"name": str, "arguments": dict}
#         results, intention = extract_json(last_message.content, expected_schema)
#         if results:
#             for tool_call in results:
#                 if tool_call["name"] != "human_assistance":
#                     tool_calls_to_confirm.append(tool_call)
#
#     # 如果没有需要确认的工具调用，直接返回
#     if not tool_calls_to_confirm:
#         return {"messages": []}
#
#     # 准备确认信息
#     if len(tool_calls_to_confirm) == 1:
#         tool_call = tool_calls_to_confirm[0]
#         query = f"是否允许调用工具 '{tool_call['name']}'？参数: {tool_call.get('args', tool_call.get('arguments', {}))}"
#         tool_name = tool_call['name']
#         tool_args = tool_call.get('args', tool_call.get('arguments', {}))
#     else:
#         tool_names = [tc['name'] for tc in tool_calls_to_confirm]
#         query = f"是否允许调用以下工具: {', '.join(tool_names)}？"
#         tool_name = "多个工具"
#         tool_args = {}
#
#     # 调用人工确认工具（这会触发interrupt）
#     confirmation_result = human_assistance.invoke({
#         "query": query,
#         "current_state": "测试用例生成阶段",
#         "tool_name": tool_name,
#         "tool_args": tool_args
#     })
#
#     # 处理确认结果
#     if "取消" in confirmation_result or "否" in confirmation_result or "不" in confirmation_result:
#         # 用户取消工具调用
#         cancel_message = f"用户取消了工具调用。原计划调用的工具: {', '.join([tc['name'] for tc in tool_calls_to_confirm])}"
#         return {"messages": [HumanMessage(content=cancel_message)]}
#     else:
#         # 用户确认，继续执行
#         confirm_message = f"用户确认工具调用: {confirmation_result}"
#         return {"messages": [HumanMessage(content=confirm_message)]}
#
#
# def humanConfirmationChecker(state: GeneratorState):
#     """
#     **Branch Decision**
#     根据人工确认结果决定下一步
#     """
#     messages = state["messages"]
#     if messages and messages[-1].content and "no" in messages[-1].content:
#         # 用户取消了工具调用，返回重新生成
#         return "testMethodGenerator"
#     else:
#         # 用户确认或没有需要确认的工具，继续执行工具调用
#         return "tools"


def codeExtractor(state: GeneratorState):
    """
    **State Node**: Code Extraction
    Extracts code from the dialogue flow and determines whether the code is runnable, including checking for necessary packages, import statements, and class definitions.
    """
    envServer = state["envServer"]
    last_message = state["messages"][-1]
    test_case = extract_code(last_message.content)
    if not test_case:
        return {"messages": [HumanMessage(
            "Have you fully understood the tested method along with its contextual dependencies? \n"
            "If yes, please proceed to generate the JUnit test case in a Markdown code block (```java ```). \n"
            "If not, continue analyzing and searching for relevant context and dependencies until you are ready./no_think"
        )]}
    if "import" not in test_case or "class" not in test_case:
        return {"messages": [HumanMessage(
            "The test case is not runnable. Make sure it includes the necessary package, import sentence and class definitions./no_think")]}
    class_pattern = r'public class\s+(\w+)\s*(?:\{|\s+extends|\s+implements|$)'
    matches = re.findall(class_pattern, test_case)
    test_class_name = matches[0]
    return {"test_case": envServer.simple_fix(test_case), "test_class_name": test_class_name}


def extractChecker(state: GeneratorState):
    """
    **Branch Decision**
    Determines the next execution path based on the output of the 'Code Extraction' node.
    If code cannot be extracted or is missing information, returns to the 'Test Case Generation' node.
    Otherwise, proceeds to the 'Compilation' node.
    """
    test_case = state["test_case"]
    if test_case:
        return "compilation"
    return "testMethodGenerator"


def compilation(state: GeneratorState):
    """
    **State Node**: Compilation
    Compiles the test case and returns the compilation result.
    """
    envServer = state["envServer"]
    assert (state["test_case"] is not None)
    result = envServer.run_compile_test(state["test_case"], state["test_class_name"])
    compile_result = True if result["result"] == "Success" else False
    return {"test_result": result["result"], "test_report": str(result["output"]), "compile_result": compile_result}


def compilationChecker(state: GeneratorState):
    """
    **Branch Decision**
    Determines the next execution path based on the output of the 'Compilation' node.
    If compilation is successful, proceeds to the 'Execution' node.
    Otherwise, proceeds to the 'Feedback Iteration' node.
    """
    if state["test_result"] == "Success":
        return "execution"
    return "feedbackIteration"


def execution(state: GeneratorState):
    """
    **State Node**: Execution
    Executes the test case and returns the execution result.
    """
    envServer = state["envServer"]
    assert (state["test_case"] is not None)
    result = envServer.run_execute_test(state["test_case"], state["test_class_name"])
    execute_result = True if result["result"] == "Success" else False
    return {"test_result": result["result"], "test_report": str(result["output"]), "execute_result": execute_result}


def executionChecker(state: GeneratorState):
    """
    **Branch Decision**
    Determines the next execution path based on the output of the 'Execution' node.
    If execution is successful, proceeds to the 'Report Generation' node.
    Otherwise, proceeds to the 'Execution Review' node.
    """
    if state["test_result"] == "Success":
        return "reportGenerator"
    return "executionReview"


def feedbackIteration(state: GeneratorState):
    """
    **State Node**: Feedback Iteration
    Provides feedback for compilation or execution failures, generates feedback information based on the prompt template, and returns the feedback.
    """
    test_result = state["test_result"]
    test_report = state["test_report"]
    if test_result == "Syntax Error":
        return {"feedback_times": state["feedback_times"] + 1,
                "messages": [HumanMessage("The test case contains syntax errors. Please fix the errors and try again."
                                          "You can use tools to help you find and fix the errors."
                                          f"tools: {', '.join([tool.name for tool in tools])}"
                                          "The error message is as follows: \n" + test_report + "/no_think")]}
    elif test_result == "Compile Error":
        return {"feedback_times": state["feedback_times"] + 1,
                "messages": [HumanMessage("The test case failed to compile. Please fix the errors and try again."
                                          "You can use tools to help you find and fix the errors."
                                          f"tools: {', '.join([tool.name for tool in tools])}"
                                          "The error message is as follows: \n" + test_report + "/no_think")]}
    elif test_result == "Execute Error":
        return {"feedback_times": state["feedback_times"] + 1,
                "messages": [HumanMessage("The test case failed to execute. Please fix the errors and try again."
                                          "You can use tools to help you find and fix the errors."
                                          f"tools: {', '.join([tool.name for tool in tools])}"
                                          "The error message is as follows: \n" + test_report + "/no_think")]}


def feedbackIterationChecker(state: GeneratorState):
    """
    **Branch Decision**
    Determines the next execution path based on the output of the 'Feedback Iteration' node.
    If the number of feedback iterations has not reached the maximum, returns to the 'Test Case Generation' node.
    Otherwise, proceeds to the 'Report Generation' node (regardless of whether the test case was successfully generated).
    """
    feedback_times = state["feedback_times"]
    max_feedback_times = state["max_feedback_times"]
    if feedback_times < max_feedback_times:
        return "testMethodGenerator"
    return "reportGenerator"


def executionReview(state: GeneratorState):
    """
    **State Node**: Execution Review
    Reflects on failed test case executions and execution information, calls the LLM to determine whether the issue lies with the method or the test case.
    Returns the reflection result; find_bug=True indicates a problem with the method, False indicates a problem with the test case.
    """

    def extract_review(content: str):
        # 移除 <think> 标签及其内容
        content = re.sub(r'<think>.*?</think>', '', content, flags=re.DOTALL).strip()

        # 方法1: 尝试提取代码块中的 JSON
        match = re.search(r'```json(.*?)```', content, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(1))
            except json.JSONDecodeError:
                pass

        # 方法2: 尝试提取没有代码块的 JSON
        match = re.search(r'\{.*\}', content, re.DOTALL)
        if match:
            try:
                return json.loads(match.group(0))
            except json.JSONDecodeError:
                pass

        # 方法3: 尝试直接解析整个内容
        try:
            return json.loads(content)
        except json.JSONDecodeError:
            pass

        return None

    assert state["test_result"] == "Execute Error"
    execution_report = state["test_report"]
    prompt = Execution_Review_Template.invoke({"execution_report": execution_report,
                                               "test_specification": json.dumps(state["requirement"], indent=4)})
    valid_prompt = prompt.to_messages()
    result = llm.invoke(valid_prompt)

    # 添加调试日志
    logger.info(f"LLM Response in executionReview: {result.content}")

    review = extract_review(result.content)
    if review is None:
        # 如果无法解析，使用默认值并记录警告
        logger.warning("无法解析审查结果，使用默认值: test_case_error")
        review = {"issue": "test_case_error"}

    if review["issue"] == "method_bug":
        find_bug = True
    elif review["issue"] == "test_case_error":
        find_bug = False
    else:
        # 处理未知的 issue 类型
        logger.warning(f"未知的审查结果类型: {review.get('issue')}，使用默认值: test_case_error")
        find_bug = False

    valid_prompt.append(result)
    return {"messages": valid_prompt, "find_bug": find_bug}
# def executionReview(state: GeneratorState):
#     """
#     **State Node**: Execution Review
#     Reflects on failed test case executions and execution information, calls the LLM to determine whether the issue lies with the method or the test case.
#     Returns the reflection result; find_bug=True indicates a problem with the method, False indicates a problem with the test case.
#     """
#
#     def extract_review(content: str):
#         match = re.search(r'```json(.*?)```', content, re.DOTALL)
#         return json.loads(match.group(1)) if match else None
#
#     assert state["test_result"] == "Execute Error"
#     execution_report = state["test_report"]
#     prompt = Execution_Review_Template.invoke({"execution_report": execution_report,
#                                                "test_specification": json.dumps(state["requirement"], indent=4)})
#     valid_prompt = prompt.to_messages()
#     result = llm.invoke(valid_prompt)
#     review = extract_review(result.content)
#     if review is None:
#         raise ValueError("Invalid review result.")
#     if review["issue"] == "method_bug":
#         find_bug = True
#     elif review["issue"] == "test_case_error":
#         find_bug = False
#     else:
#         find_bug = False
#     valid_prompt.append(result)
#     return {"messages": valid_prompt, "find_bug": find_bug}


def reviewChecker(state: GeneratorState):
    """
    **Branch Decision**
    Determines the next execution path based on the output of the 'Execution Review' node.
    If find_bug is True, the test case is considered correct and proceeds to the 'Report Generation' node.
    Otherwise, it proceeds to the 'Feedback Iteration' node.
    """
    find_bug = state["find_bug"]
    if find_bug:
        return "reportGenerator"
    return "feedbackIteration"


def reportGenerator(state: GeneratorState):
    """
    **State Node**: Report Generation
    Marks the end of the generation phase and enters the report generation phase, returning the test case.
    """
    test_case = state["test_case"]
    return {"test_case": test_case}


def coverage_report(state: GeneratorState):
    """
    **State Node**: Coverage Report
    Runs coverage testing and returns the coverage report.
    """
    envServer = state["envServer"]
    if not state["compile_result"]:
        ret = {"result": "Compile Error", "output": "Compile Error"}
        return {"coverage_report": ret}
    full_class_name = state["full_method_name"].rsplit('.', 1)[0]  # Split from the right to extract full_class_name
    result = envServer.run_coverage_test(state["test_case"], state["test_class_name"], state["package_name"],
                                         full_class_name, state["method_signature"], state["start_line"],
                                         state["end_line"])
    return {"coverage_report": result}


def mutation_report(state: GeneratorState):
    """
    **State Node**: Mutation Test Report
    Runs mutation testing and returns the mutation test report.
    """
    envServer = state["envServer"]
    if not state["execute_result"] or state["test_result"] == "Compile Error":
        ret = {"result": "Execute Error", "output": "Execute Error"}
        return {"mutation_report": ret}
    class_name = state["full_method_name"].rsplit('.', 2)[-2]  # Split from the right to extract class_name
    # parent_class_name = class_name.replace("$", "\$")
    result = envServer.run_mutation_test(state["package_name"], class_name, state["test_case"],
                                         state["test_class_name"],
                                         state["start_line"], state["end_line"])
    return {"mutation_report": result}


def add_testcase_to_CKG(state: GeneratorState):
    """
    **State Node**: Add Test Case to CKG
    Adds the generated test case to the CKG.
    """
    if state["generate_or_evaluation"] == "evaluation":
        return {"messages": [HumanMessage("Evaluation mode, no need to add test case to CKG.")]}
    envServer = state["envServer"]
    test_case = state["test_case"]
    test_class_name = "_" + str(envServer.number) + "_" + state["test_class_name"]
    envServer.number += 1
    # Modify the class name in test_case to avoid conflicts with subsequent test classes of the same name
    test_case = test_case.replace(state["test_class_name"], test_class_name)
    ret = envServer.add_test_to_CKG(test_case, test_class_name)
    if ret["result"] == "Error":
        return {"messages": [HumanMessage("Failed to add test case to CKG.")]}
    focal_clazz_name = state["class_name"]
    focal_method_fq_name = state["full_method_name"]
    index = focal_method_fq_name.find(focal_clazz_name)
    if index != -1:
        focal_clazz_fq_name = focal_method_fq_name[:index + len(focal_clazz_name)]
    else:
        focal_clazz_fq_name = focal_clazz_name
    find_bug = state["find_bug"]
    method_signature = state["method_signature"]
    requirement = state["requirement"]
    test_report = state["test_result"]  # "Success" or "Execute Error" or "Compile Error" or "Syntax Error"
    coverage_rate = state["coverage_report"]["output"]["line_coverage"] if state["coverage_report"][
                                                                               "result"] == "Success" else "0"
    coverage_lines = state["coverage_report"]["output"]["covered_lines"] if state["coverage_report"][
                                                                                "result"] == "Success" else []
    mutation_score = state["mutation_report"]["output"]["mutation_score"] if state["mutation_report"][
                                                                                 "result"] == "Success" else "0"
    mutants = state["mutation_report"]["output"]["filtered_mutations"] if state["mutation_report"][
                                                                              "result"] == "Success" else {}
    result = graph_retriever.update_test_class(test_class_name, focal_clazz_fq_name, focal_method_fq_name,
                                               method_signature,
                                               test_report, coverage_rate, coverage_lines, mutation_score,
                                               json.dumps(mutants),
                                               find_bug, json.dumps(requirement))
    return {"messages": [HumanMessage("Test case added to CKG successfully.")]}

##########################################################################
###################### Construct the State Graph #########################
##########################################################################


generator_graph = StateGraph(GeneratorState)

generator_graph.add_node("init", generator_init_state)
generator_graph.add_node("testMethodGenerator", testMethodGenerator,
                         retry=RetryPolicy(max_attempts=3, retry_on=ValueError))
# generator_graph.add_node("human_confirmation", human_confirmation_node)  # 新增人工确认节点
generator_graph.add_node("tools", call_tool)
generator_graph.add_node("codeExtractor", codeExtractor)
generator_graph.add_node("compilation", compilation)
generator_graph.add_node("execution", execution)
generator_graph.add_node("feedbackIteration", feedbackIteration)
generator_graph.add_node("reportGenerator", reportGenerator)
generator_graph.add_node("coverageReport", coverage_report)
generator_graph.add_node("mutationReport", mutation_report)
generator_graph.add_node("executionReview", executionReview, retry=RetryPolicy(max_attempts=3, retry_on=ValueError))
generator_graph.add_node("addTestCaseToCKG", add_testcase_to_CKG)

generator_graph.add_edge(START, "init")
generator_graph.add_edge("init", "testMethodGenerator")
generator_graph.add_conditional_edges("testMethodGenerator", generatorChecker,
                                      {"tools": "tools", "codeExtractor": "codeExtractor"})
# generator_graph.add_conditional_edges("testMethodGenerator", generatorChecker,
#                                       {"tools": "tools", "codeExtractor": "codeExtractor", "human_confirmation": "human_confirmation"})
# generator_graph.add_conditional_edges("human_confirmation", humanConfirmationChecker,
#                                       {"testMethodGenerator": "testMethodGenerator", "tools": "tools"})
generator_graph.add_edge("tools", "testMethodGenerator")
generator_graph.add_conditional_edges("codeExtractor", extractChecker,
                                      {"testMethodGenerator": "testMethodGenerator",
                                       "compilation": "compilation"})
generator_graph.add_conditional_edges("compilation", compilationChecker,
                                      {"execution": "execution", "feedbackIteration": "feedbackIteration"})
generator_graph.add_conditional_edges("execution", executionChecker,
                                      {"reportGenerator": "reportGenerator", "executionReview": "executionReview"})
generator_graph.add_conditional_edges("feedbackIteration", feedbackIterationChecker,
                                      {"testMethodGenerator": "testMethodGenerator",
                                       "reportGenerator": "reportGenerator"})
generator_graph.add_conditional_edges("executionReview", reviewChecker,
                                      {"feedbackIteration": "feedbackIteration", "reportGenerator": "reportGenerator"})
generator_graph.add_edge("reportGenerator", "coverageReport")
generator_graph.add_edge("coverageReport", "mutationReport")
generator_graph.add_edge("mutationReport", "addTestCaseToCKG")
generator_graph.add_edge("addTestCaseToCKG", END)

generator_graph = generator_graph.compile()

if __name__ == "__main__":
    try:
        # Generate the image data
        image_data = generator_graph.get_graph().draw_mermaid_png()

        # Save the image data to a file
        with open("Generator.png", "wb") as f:
            f.write(image_data)
    except Exception as e:
        print(e)
        print("Failed to generate the image.")
    print("Graph compiled successfully.")
