import json
import yaml
import asyncio
from pocketflow import AsyncNode

# 导入LLM工具
from utils.llm_tools import call_llm, call_llm_to_extract_entities, call_llm_for_judgment, stream_llm, stream_llm_with_thinking

# 导入新的统一工具接口
from utils.tools import (
    is_public_ip,
    query_threat_intelligence,
    query_asset_info,
    query_attack_history,
    analyze_user_agent
)

# 导入内部工具函数
from utils.tools.internal.dga_detector import detect_dga_domain, batch_detect_dga_domains
from utils.tools.internal.weak_credential_detector import check_weak_credentials
from utils.tools.internal.malicious_referrer_detector import is_malicious_referrer
from utils.tools.external.threat_intel import batch_query_threat_intelligence

# 导入剧本库
from utils.playbooks import find_playbooks_for_event

async def _send_node_status(shared, node_name, message_type, data=None):
    """
    辅助函数：向状态队列发送节点状态消息。
    """
    status_queue = shared.get("status_queue")
    if status_queue:
        await status_queue.put({
            "type": message_type,
            "step": node_name,
            "data": data if data is not None else {}
        })

class InitialTriageNode(AsyncNode):
    """
    节点1: 初步分类与实体提取。
    """
    async def prep_async(self, shared):
        node_name = "InitialTriageNode"
        # await _send_node_status(shared, node_name, "node_start", {"message": "开始初步分类与实体提取"})
        print(f"[Node:{node_name}] prep - 接收原始告警。Shared keys: {list(shared.keys())}")
        # 返回原始告警数据和 shared 字典
        return shared["raw_alert"], shared

    async def exec_async(self, prep_res_tuple): # 接收元组
        node_name = "InitialTriageNode"
        raw_alert_json_str, shared_for_exec = prep_res_tuple # 解包
        
        await _send_node_status(shared_for_exec, node_name, "llm_call", {"message": "正在调用LLM提取实体..."})
        print("[Node:InitialTriageNode] exec - 正在调用LLM提取实体...\n")
        extracted_entities = await call_llm_to_extract_entities(raw_alert_json_str, status_queue=shared_for_exec.get("status_queue"))
        
        event_type = extracted_entities.get("event_type", "未知事件").strip()
        print(f"[DEBUG] LLM extracted event_type: '{event_type}' (length: {len(event_type)}, repr: {repr(event_type)})\n")
        await _send_node_status(shared_for_exec, node_name, "entity_extracted", {"event_type": event_type, "entities": extracted_entities})
        print(f"[Node:InitialTriageNode] exec - 提取到事件类型: {event_type}\n")
        
        matched_playbooks = find_playbooks_for_event(event_type)
        
        initial_playbook = None
        if matched_playbooks:
            initial_playbook = matched_playbooks[0]
            await _send_node_status(shared_for_exec, node_name, "playbook_found", {"playbook_id": initial_playbook['playbook_id']})
            print(f"[Node:{node_name}] exec - 找到匹配剧本: {initial_playbook['playbook_id']}\n")
        else:
            await _send_node_status(shared_for_exec, node_name, "playbook_not_found", {"event_type": event_type})
            print("[Node:InitialTriageNode] exec - 未找到匹配剧本。\n")

        return {"entities": extracted_entities, "initial_playbook": initial_playbook}

    async def post_async(self, shared, prep_res, exec_res):
        node_name = "InitialTriageNode"
        # 检查执行结果中是否存在错误，或者实体是否为空
        if not exec_res or exec_res.get("error") or not exec_res.get("entities"):
            shared["final_report"] = f"实体提取失败: {exec_res.get('error', '未能从告警中提取有效实体')}"
            await _send_node_status(shared, node_name, "error", {"message": shared["final_report"]})
            print(f"[Node:{node_name}] post - 实体提取失败，返回Action: unsupported_event")
            return "unsupported_event"

        shared["working_theory"] = exec_res
        shared["entities"] = exec_res.get("entities", {})
        # 从LLM提取结果中获取公网IP、内网IP和提取的域名列表，并存储到shared
        shared["public_ips"] = exec_res["entities"].get("public_ips", [])
        shared["private_ips"] = exec_res["entities"].get("private_ips", [])
        shared["extracted_domains"] = exec_res["entities"].get("extracted_domains", [])
        await _send_node_status(shared, node_name, "node_end", {"message": "初步分类与实体提取完成", "shared_keys": list(shared.keys())})
        print(f"[Node:{node_name}] post - 更新shared: working_theory, entities, public_ips, private_ips, extracted_domains。Shared keys: {list(shared.keys())}")

        if not exec_res.get("initial_playbook"):
            print(f"[Node:{node_name}] post - 未找到剧本，返回Action: unsupported_event")
            return "unsupported_event"
        
        print(f"[Node:{node_name}] post - 返回Action: default")
        return "default"

class IntelligenceGatheringNode(AsyncNode):
    """
    节点2: 情报收集。
    """
    async def prep_async(self, shared):
        node_name = "IntelligenceGatheringNode"
        await _send_node_status(shared, node_name, "node_start", {"message": "开始情报收集"})
        print(f"[Node:{node_name}] prep - 准备情报收集。Shared keys: {list(shared.keys())}\n")
        playbook = shared["working_theory"].get("initial_playbook")
        entities = shared["entities"]
        
        if not playbook or not playbook.get("plan_steps"):
            await _send_node_status(shared, node_name, "error", {"message": "缺少有效的剧本或计划步骤。"})
            raise ValueError("IntelligenceGatheringNode: 缺少有效的剧本或计划步骤。")
            
        return playbook, entities, shared # 返回 playbook, entities, 和 shared

    async def exec_async(self, prep_res_tuple):
        node_name = "IntelligenceGatheringNode"
        playbook, entities, shared_for_exec = prep_res_tuple # 解包
        gathered_intelligence = {}

        await _send_node_status(shared_for_exec, node_name, "gathering_start", {"playbook_id": playbook['playbook_id']})
        print(f"[Node:{node_name}] exec - 正在执行剧本 {playbook['playbook_id']} 中的情报收集步骤...")

        tool_map = {
            # External Tools
            "is_public_ip": is_public_ip,
            "query_threat_intelligence": query_threat_intelligence,
            
            # Internal Tools (直接调用，不涉及复杂会话管理)
            "query_asset_info": query_asset_info,
            "analyze_user_agent": analyze_user_agent,
            "detect_dga_domain": detect_dga_domain,
            "check_weak_credentials": check_weak_credentials,
            "is_malicious_referrer": is_malicious_referrer,
            # 占位符工具
            "batch_detect_dga_domains": batch_detect_dga_domains,
            "batch_query_threat_intelligence": batch_query_threat_intelligence,
        }

        # 从 shared 中获取连接器实例
        siem_connector = shared_for_exec["connectors"]["siem"]

        for step in playbook["plan_steps"]:
            tool_name = step["tool_name"]
            parameter_mapping = step["parameter_mapping"]
            step_name = step["step_name"]
            step_description = step.get("description", "无描述")
            extra_tool_params = step.get("tool_params", {})

            await _send_node_status(shared_for_exec, node_name, "tool_executing", {
                "tool_name": tool_name,
                "step_name": step_name,
                "description": step_description, # 添加描述
                "input_params": {k: entities.get(v) for k, v in parameter_mapping.items()} # 显示从实体映射的参数
            })
            print(f"  - 正在执行步骤: {step_name} (工具: {tool_name})")

            if tool_name not in tool_map:
                await _send_node_status(shared_for_exec, node_name, "tool_error", {"tool_name": tool_name, "message": "未知的工具，跳过。"})
                print(f"[WARNING] 未知的工具: {tool_name}，跳过此步骤。\n")
                continue

            tool_params = {} # 初始化本次工具调用的参数
            missing_params = []
            for tool_param_name, entity_key in parameter_mapping.items():
                if entity_key in entities and entities[entity_key]:
                    tool_params[tool_param_name] = entities[entity_key]
                else:
                    missing_params.append(entity_key)

            # 合并从剧本中读取的额外参数
            tool_params.update(extra_tool_params)

            if missing_params:
                await _send_node_status(shared_for_exec, node_name, "tool_error", {"tool_name": tool_name, "message": f"缺少所需实体: {missing_params}，跳过。"})
                print(f"[WARNING] 缺少执行工具 {tool_name} 所需的实体: {', '.join(missing_params)}，跳过此步骤。")
                continue

            try:
                tool_function = None
                # 智能判断IP类型，并决定是否执行query_threat_intelligence
                if tool_name == "query_threat_intelligence" and "indicator" in tool_params and tool_params.get("indicator_type") == "ipv4":
                    ip_to_check = tool_params["indicator"]
                    if ip_to_check in shared_for_exec.get("private_ips", []):
                        await _send_node_status(shared_for_exec, node_name, "tool_skipped", {
                            "tool_name": tool_name,
                            "message": f"检测到内网IP ({ip_to_check})，跳过外部威胁情报查询。",
                            "input_params": tool_params
                        })
                        print(f"  - [跳过] 检测到内网IP ({ip_to_check})，跳过外部威胁情报查询。")
                        gathered_intelligence[step_name] = {"status": "skipped", "reason": "内网IP，无需查询外部情报"}
                        continue # 跳过当前步骤，继续下一个
                    else:
                        tool_function = tool_map[tool_name]
                elif tool_name == "query_attack_history":
                    tool_function = siem_connector.query_attack_history
                elif tool_name == "batch_detect_dga_domains":
                    domains_to_check = shared_for_exec.get("extracted_domains", [])
                    if domains_to_check:
                        print(f"  - 正在对 {len(domains_to_check)} 个域名进行批量DGA检测...")
                        result = await batch_detect_dga_domains(domains_to_check, status_queue=shared_for_exec.get("status_queue"))
                        gathered_intelligence[step_name] = result
                    else:
                        gathered_intelligence[step_name] = {"status": "skipped", "reason": "没有可检测的域名"}
                        print(f"  - [跳过] 没有可检测的域名，跳过批量DGA检测。")
                    await _send_node_status(shared_for_exec, node_name, "tool_completed", {
                        "tool_name": tool_name,
                        "step_name": step_name,
                        "description": step_description,
                        "input_params": {"domains": domains_to_check},
                        "result": gathered_intelligence[step_name]
                    })
                    continue
                elif tool_name == "batch_query_threat_intelligence":
                    # 从 extra_tool_params 中获取DGA检测结果的step_name
                    dga_results_step_name = extra_tool_params.get("dga_results_step_name")
                    dga_detection_results = []
                    if dga_results_step_name:
                        dga_detection_results = gathered_intelligence.get(dga_results_step_name, [])
                    
                    domains_to_query = []
                    for dga_result in dga_detection_results:
                        # 确保 dga_result 是字典且包含 'is_dga' 和 'domain' 键
                        if isinstance(dga_result, dict) and dga_result.get("is_dga") and dga_result.get("domain"):
                            domains_to_query.append(dga_result.get("domain"))
                    
                    indicator_type = tool_params.get("indicator_type")

                    if domains_to_query and indicator_type:
                        print(f"  - 正在对 {len(domains_to_query)} 个 {indicator_type} (DGA可疑) 进行批量威胁情报查询...")
                        result = await batch_query_threat_intelligence(domains_to_query, indicator_type, status_queue=shared_for_exec.get("status_queue"))
                        gathered_intelligence[step_name] = result
                    else:
                        gathered_intelligence[step_name] = {"status": "skipped", "reason": "没有可查询的DGA可疑域名或指标类型"}
                        print(f"  - [跳过] 没有可查询的DGA可疑域名或指标类型，跳过批量威胁情报查询。")
                    await _send_node_status(shared_for_exec, node_name, "tool_completed", {
                        "tool_name": tool_name,
                        "step_name": step_name,
                        "description": step_description,
                        "input_params": {"indicators": domains_to_query, "indicator_type": indicator_type},
                        "result": gathered_intelligence[step_name]
                    })
                    continue
                else:
                    tool_function = tool_map[tool_name]

                # 如果tool_function仍然是None，说明被跳过了
                if tool_function is None:
                    continue

                # 从shared_for_exec中获取status_queue并传递给工具函数
                status_queue = shared_for_exec.get("status_queue")

                # 将status_queue添加到tool_params中，让工具函数正确接收
                tool_params_with_queue = tool_params.copy()
                tool_params_with_queue['status_queue'] = status_queue

                result = await tool_function(**tool_params_with_queue)
                gathered_intelligence[step_name] = result
                await _send_node_status(shared_for_exec, node_name, "tool_completed", {
                    "tool_name": tool_name,
                    "step_name": step_name,
                    "description": step_description, # 添加描述
                    "input_params": tool_params, # 显示实际传递给工具的参数
                    "result": result # 显示工具的完整结果
                })
            except Exception as e:
                await _send_node_status(shared_for_exec, node_name, "tool_error", {"tool_name": tool_name, "error": str(e)})
                print(f"[ERROR] 执行工具 {tool_name} 失败: {e}")
                gathered_intelligence[step_name] = {"error": str(e)}

        return gathered_intelligence

    async def post_async(self, shared, prep_res, exec_res):
        node_name = "IntelligenceGatheringNode"
        shared["gathered_intelligence"] = exec_res
        await _send_node_status(shared, node_name, "node_end", {"message": "情报收集完成", "gathered_intelligence": exec_res, "shared_keys": list(shared.keys())})
        print(f"[Node:{node_name}] post - 情报收集完成。更新shared: gathered_intelligence。Shared keys: {list(shared.keys())}")
        print("[Node:IntelligenceGatheringNode] post - 返回Action: default")
        return "default"

class ExpertJudgmentNode(AsyncNode):
    """
    节点3: 专家判断。
    """
    async def prep_async(self, shared):
        node_name = "ExpertJudgmentNode"
        await _send_node_status(shared, node_name, "node_start", {"message": "开始专家判断"})
        print(f"[Node:{node_name}] prep - 准备专家判断。Shared keys: {list(shared.keys())}\n")
        context = {
            "raw_alert": shared["raw_alert"],
            "entities": shared["entities"],
            "initial_hypothesis": shared["working_theory"].get("initial_playbook", {}).get("description", "无初步假设"),
            "gathered_intelligence": shared["gathered_intelligence"]
        }
        return context, shared # 返回 context 和 shared

    async def exec_async(self, prep_res_tuple):
        node_name = "ExpertJudgmentNode"
        context, shared_for_exec = prep_res_tuple # 解包
        await _send_node_status(shared_for_exec, node_name, "llm_call", {"message": "正在调用思考进行专家判断..."})
        print("[Node:ExpertJudgmentNode] exec - 正在调用思考进行专家判断...")
        prompt = f"""
        你是一名拥有10年经验的资深SOC安全专家，正在使用内置思考模型进行分析。请用中文进行详细的推理分析，展示你的思考过程。你的任务是基于以下信息，判断这个告警是真实威胁（True Positive）还是误报（False Positive）。如果信息不足以做出明确判断，请标记为不确定（Uncertain）。

        ### 原始告警信息 (JSON格式)
        ```json
        {context['raw_alert']}
        ```

        ### 提取的关键实体
        ```json
        {json.dumps(context['entities'], indent=2, ensure_ascii=False)}
        ```

        ### 我们的初步假设
        这可能是一个 "{context['initial_hypothesis']}"。

        ### 我们收集到的外部情报
        ```json
        {json.dumps(context['gathered_intelligence'], indent=2, ensure_ascii=False)}
        ```

        ### 你的任务
        1.  **批判性分析**：
            - 对比原始告警（特别是其描述、标题、分类）和收集到的情报。
            - 情报是否支持或推翻了初步假设？例如，一个标记为“网站扫描”的告警，如果其源IP在威胁情报中是一个已知的恶意扫描器，那么它很可能是真实威胁。反之，如果源IP是公司内部的扫描设备，那它可能就是误报。
            - 考虑告警级别（level）、威胁方向（threat_direction）等原始信息。
        2.  **做出判断**：根据你的分析，从以下三个选项中选择一个作为你的结论：`True Positive`, `False Positive`, `Uncertain`。
        3.  **提供理由**：用简洁明了的语言解释你做出该判断的核心理由。

        **重要要求**：请用中文进行详细分析和推理，展示你的思考过程。请以YAML格式返回分析结果：
        ```yaml
        reasoning: "详细的中文推理过程，包括各个分析步骤"
        evidence_analysis:
          supporting_threat: ["支持真实威胁的证据"]
          supporting_benign: ["支持误报的证据"]
        judgment: "True Positive/False Positive/Uncertain"
        confidence: "高/中/低"
        key_factors: "关键决策因素"
        ```
        """
        # 调用正确的流式函数，并传递正确的step_name
        yaml_part = await stream_llm_with_thinking(
            prompt, 
            shared_for_exec.get("status_queue"), 
            node_name, # 使用节点名作为step_name
            temperature=0.1
        )
        
        # 解析流式调用返回的完整YAML字符串
        try:
            # 通常LLM的输出会包含```yaml```这样的标记，需要先去除
            if "```yaml" in yaml_part and "```" in yaml_part:
                clean_yaml_str = yaml_part.split("```yaml")[1].split("```")[0].strip()
            else:
                clean_yaml_str = yaml_part.strip()

            judgment_result = yaml.safe_load(clean_yaml_str)
            if not isinstance(judgment_result, dict):
                raise yaml.YAMLError("解析结果不是一个字典")

        except (yaml.YAMLError, IndexError) as e:
            print(f"[ERROR] 解析专家判断的YAML结果失败: {e}")
            print(f"[DEBUG] 原始LLM输出: {yaml_part}")
            # 创建一个包含错误的默认结果，以避免流程中断
            judgment_result = {
                "reasoning": f"LLM返回的YAML格式错误，无法解析。原始输出: {yaml_part}",
                "judgment": "Uncertain",
                "confidence": "低",
                "key_factors": "LLM输出格式错误"
            }

        return judgment_result

    async def post_async(self, shared, prep_res, exec_res):
        node_name = "ExpertJudgmentNode"
        # 创建一个副本，移除 reasoning 字段，只保留结构化判断结果
        structured_judgment = {k: v for k, v in exec_res.items() if k != "reasoning"}
        shared["expert_judgment"] = structured_judgment
        judgment = exec_res.get("judgment", "Uncertain")
        
        # 准备发送给前端的完整结构化判断数据
        full_judgment_data = {
            "message": f"专家判断完成: {judgment}",
            **structured_judgment # 将 structured_judgment 的所有键值对展开到这里
        }
        
        await _send_node_status(shared, node_name, "node_end", full_judgment_data)
        print(f"[Node:{node_name}] post - 判断结果: {judgment}。更新shared: expert_judgment。Shared keys: {list(shared.keys())}")
        
        if judgment == "True Positive":
            print("[Node:ExpertJudgmentNode] post - 返回Action: action_true_positive")
            return "action_true_positive"
        elif judgment == "False Positive":
            print("[Node:ExpertJudgmentNode] post - 返回Action: action_false_positive")
            return "action_false_positive"
        else: # Uncertain 或其他未识别的判断
            print("[Node:ExpertJudgmentNode] post - 返回Action: action_uncertain")
            return "action_uncertain"

class RefineInvestigationNode(AsyncNode):
    """
    新节点: 调查精炼。
    """
    async def prep_async(self, shared):
        node_name = "RefineInvestigationNode"
        await _send_node_status(shared, node_name, "node_start", {"message": "开始精炼调查方向"})
        print(f"[Node:{node_name}] prep - 准备精炼调查。Shared keys: {list(shared.keys())}\n")
        context = {
            "raw_alert": shared["raw_alert"],
            "entities": shared["entities"],
            "initial_hypothesis": shared["working_theory"].get("initial_playbook", {}).get("description", "无初步假设"),
            "gathered_intelligence": shared["gathered_intelligence"],
            "expert_judgment": shared["expert_judgment"]
        }
        return context, shared # 返回 context 和 shared

    async def exec_async(self, prep_res_tuple):
        node_name = "RefineInvestigationNode"
        context, shared_for_exec = prep_res_tuple # 解包
        await _send_node_status(shared_for_exec, node_name, "llm_call", {"message": "正在调用LLM精炼调查方向..."})
        print("[Node:RefineInvestigationNode] exec - 正在调用LLM精炼调查方向...")
        prompt = f"""
        你是一名资深安全调查员。当前的告警分析结果为“不确定”，需要进一步调查。

        ### 原始告警信息
        ```json
        {context['raw_alert']}
        ```

        ### 提取的关键实体
        ```json
        {json.dumps(context['entities'], indent=2, ensure_ascii=False)}
        ```

        ### 已收集到的情报
        ```json
        {json.dumps(context['gathered_intelligence'], indent=2, ensure_ascii=False)}
        ```

        ### 专家判断 (不确定原因)
        ```yaml
        {yaml.dump(context['expert_judgment'], allow_unicode=True)}
        ```

        ### 你的任务
        1.  **分析不确定性**: 根据“专家判断”中的理由，分析导致不确定的主要原因。
        2.  **建议下一步行动**: 提出具体的、可执行的建议，说明需要收集哪些额外信息，或者需要调整哪些调查方向，以便能够做出明确的判断。
        3.  **输出格式**: 以简洁的自然语言描述建议，例如：“需要查询源IP的历史活动记录，并分析User-Agent的异常行为。”

        请直接输出建议内容，不要包含其他无关信息。
        """
        investigation_directive = await stream_llm_with_thinking(
            prompt,
            shared_for_exec.get("status_queue"),
            node_name, # 将当前节点名传递过去，以便前端正确路由
            temperature=0.5
        )

        # 提取 ```directive``` 块内的内容作为最终指令
        final_directive_content = ""
        directive_start_tag = "```directive"
        directive_end_tag = "```"

        start_idx = investigation_directive.find(directive_start_tag)
        if start_idx != -1:
            content_start_idx = start_idx + len(directive_start_tag)
            end_idx = investigation_directive.find(directive_end_tag, content_start_idx)
            if end_idx != -1:
                final_directive_content = investigation_directive[content_start_idx:end_idx].strip()
            else:
                # 如果没有找到结束标签，则从开始标签到末尾
                final_directive_content = investigation_directive[content_start_idx:].strip()
                print(f"[WARNING] LLM输出中未找到 ```directive 结束块。使用从开始标签到末尾的内容。原始输出: {investigation_directive}")
        else:
            print(f"[WARNING] LLM输出中未找到 ```directive 开始块。使用全部输出作为指令。原始输出: {investigation_directive}")
            final_directive_content = investigation_directive.strip() # 降级处理，使用全部输出

        return final_directive_content

    async def post_async(self, shared, prep_res, exec_res):
        node_name = "RefineInvestigationNode"
        shared["investigation_directive"] = exec_res
        await _send_node_status(shared, node_name, "node_end", {"message": "调查精炼指令已生成", "directive": exec_res})
        print(f"[Node:{node_name}] post - 调查精炼指令: {exec_res}。更新shared: investigation_directive。Shared keys: {list(shared.keys())}")
        print("[Node:RefineInvestigationNode] post - 返回Action: default (循环回情报收集)")
        return "default"

class GenerateReportNode(AsyncNode):
    """
    节点4: 报告生成。
    根据最终的专家判断结果，生成相应的安全事件报告。
    """
    async def prep_async(self, shared):
        node_name = "GenerateReportNode"
        await _send_node_status(shared, node_name, "node_start", {"message": "开始报告生成"})
        print(f"[Node:{node_name}] prep - 准备报告生成。Shared keys: {list(shared.keys())}\n")
        context = {
            "raw_alert": shared["raw_alert"],
            "entities": shared["entities"],
            "working_theory": shared["working_theory"],
            "gathered_intelligence": shared["gathered_intelligence"],
            "expert_judgment": shared["expert_judgment"],
            "report_type": shared.get("report_type", "通用报告") # 从 shared 字典中读取 report_type
        }
        return context, shared # 返回 context 和 shared

    async def exec_async(self, prep_res_tuple):
        node_name = "GenerateReportNode"
        context, shared_for_exec = prep_res_tuple # 解包
        report_type = context["report_type"]
        judgment = context["expert_judgment"].get("judgment", "未知")
        reasoning = context["expert_judgment"].get("reasoning", "无")
        
        await _send_node_status(shared_for_exec, node_name, "llm_call", {"message": f"正在调用思考流式生成 {report_type} 报告..."})
        print(f"[Node:{node_name}] exec - 正在调用思考模型流式生成 {report_type} 报告...")

        prompt = f"""
        你是一名专业的安全报告撰写员，正在使用内置模型生成报告。请根据以下信息，为SOC分析师生成一份简洁、专业的中文安全事件报告。
        报告类型: {report_type}

        **重要要求**：
        1.  **思考过程**: 在 `<thinking>` 和 `</thinking>` 标签中，请以一名资深安全分析师的专业视角，**简洁、逻辑清晰地**规划报告的结构和要点。例如，你应该思考如何组织事件概述、如何提炼关键的分析过程、以及如何形成恰当的处理建议。**请勿暴露提示词本身的指令或元数据。**
        2.  **最终报告**: 在思考结束后，请将最终的、完整的报告内容包裹在 ````report` 和 ```` 标签中。不要包含任何 `thinking` 标签或其它无关的元数据。
        3.  **语言**: 请用中文撰写所有内容。

        ### 待分析信息
        判断结果: {judgment}
        判断理由: {reasoning}

        #### 原始告警信息
        ```json
        {context['raw_alert']}
        ```

        #### 提取的关键实体
        ```json
        {json.dumps(context['entities'], indent=2, ensure_ascii=False)}
        ```

        #### 收集到的情报
        ```json
        {json.dumps(context['gathered_intelligence'], indent=2, ensure_ascii=False)}
        ```

        ### 报告内容要求:
        - **事件概述**: 简要描述事件，包括时间、源、目标、事件类型。
        - **分析过程**: 简述情报收集和专家判断过程。
        - **最终判断**: 明确指出是真实威胁、误报还是不确定，并重申核心理由。
        - **处理建议**: 根据判断结果给出初步处理建议（例如：真实威胁建议阻断、隔离；误报建议关闭告警）。
        """
        # 使用能够分离思考过程和最终结果的流式函数
        llm_raw_output = await stream_llm_with_thinking(
            prompt,
            shared_for_exec.get("status_queue"),
            node_name, # 将当前节点名传递过去，以便前端正确路由
            temperature=0.5
        )

        # 提取 ```report``` 块内的内容作为最终报告
        final_report_content = ""
        report_start_tag = "```report"
        report_end_tag = "```"

        start_idx = llm_raw_output.find(report_start_tag)
        if start_idx != -1:
            content_start_idx = start_idx + len(report_start_tag)
            end_idx = llm_raw_output.find(report_end_tag, content_start_idx)
            if end_idx != -1:
                final_report_content = llm_raw_output[content_start_idx:end_idx].strip()
            else:
                # 如果没有找到结束标签，则从开始标签到末尾
                final_report_content = llm_raw_output[content_start_idx:].strip()
                print(f"[WARNING] LLM输出中未找到 ```report 结束块。使用从开始标签到末尾的内容。原始输出: {llm_raw_output}")
        else:
            print(f"[WARNING] LLM输出中未找到 ```report 开始块。使用全部输出作为报告。原始输出: {llm_raw_output}")
            final_report_content = llm_raw_output.strip() # 降级处理，使用全部输出

        return final_report_content.strip()

    async def post_async(self, shared, prep_res, exec_res):
        node_name = "GenerateReportNode"
        shared["final_report"] = exec_res
        # 发送最终报告完成消息，包含完整的报告内容
        await _send_node_status(shared, node_name, "final_report_complete", {"report_content": exec_res})
        print(f"[Node:{node_name}] post - 报告生成完成。更新shared: final_report。Shared keys: {list(shared.keys())}")
        print("[Node:GenerateReportNode] post - 返回Action: default")
        return "default"

class UnsupportedEventNode(AsyncNode):
    """
    处理不支持的事件类型。
    """
    async def prep_async(self, shared):
        # prep_async 接收 shared 字典，并将其返回。
        # 这样，exec_async 就可以通过其参数接收到这个 shared 字典。
        return shared

    async def exec_async(self, shared_for_exec): # 将 prep_async 的返回值命名为 shared_for_exec
        node_name = "UnsupportedEventNode"
        # 现在可以使用 shared_for_exec 来访问 shared 字典
        await _send_node_status(shared_for_exec, node_name, "node_start", {"message": "开始处理不支持的事件"})
        print("[Node:UnsupportedEventNode] exec - 无法识别或处理此事件类型")
        return "default"

    async def post_async(self, shared, prep_res, exec_res):
        # post_async 方法本身就直接接收 shared 字典，所以这里不需要修改
        node_name = "UnsupportedEventNode"
        shared["final_report"] = "无法识别或处理此事件类型，请手动分析。"
        await _send_node_status(shared, node_name, "node_end", {"message": "不支持事件处理完成", "final_report_summary": shared["final_report"]})
        print(f"[Node:{node_name}] post - 更新shared: final_report。Shared keys: {list(shared.keys())}")
        print("[Node:UnsupportedEventNode] post - 返回Action: default")
        return "default"