from typing import TypedDict, List, Dict, Any
import json
import asyncio
import os
import re
from pathlib import Path
from langgraph.graph import StateGraph, END
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import JsonOutputParser
from config.config import OPENAI_API_KEY, OPENAI_BASE_URL, DEFAULT_MODEL
from workflow.tools.novel_tools import split_novel_file, save_chapter_report, create_master_guide

# 1. 定义状态
class NovelState(TypedDict):
    novel_path: str
    output_dir: str
    chapter_files: List[str]
    current_index: int
    summaries: List[str]

# 2. 初始化 LLM
# 主分析 LLM，输出 JSON
llm = ChatOpenAI(
    model=DEFAULT_MODEL, 
    temperature=0.1,
    api_key=OPENAI_API_KEY,
    base_url=OPENAI_BASE_URL,
    model_kwargs={"response_format": {"type": "json_object"}},
    request_timeout=60
)

# 3. 定义节点函数

def split_node(state: NovelState):
    """切分小说节点"""
    print(f"📚 正在切分小说: {state['novel_path']}...")
    
    # 检查是否已经切分过
    novel_path = Path(state['novel_path'])
    expected_output_dir = novel_path.parent / f"{novel_path.stem}_拆解档案"
    chapters_dir = expected_output_dir / "原始章节"
    
    if chapters_dir.exists() and list(chapters_dir.glob("*.txt")):
        print("✅ 检测到已切分的章节，跳过切分步骤")
        chapter_files = sorted([str(f) for f in chapters_dir.glob("*.txt")])
        return {
            "output_dir": str(expected_output_dir),
            "chapter_files": chapter_files,
            "current_index": 0,
            "summaries": []
        }
    
    # 直接调用工具函数
    result = split_novel_file.invoke({"file_path": state['novel_path']})
    
    if "error" in result:
        raise ValueError(result["error"])
        
    return {
        "output_dir": result["output_dir"],
        "chapter_files": result["chapter_files"],
        "current_index": 0,
        "summaries": []
    }

def analyze_node(state: NovelState):
    """单章分析节点"""
    idx = state["current_index"]
    files = state["chapter_files"]
    
    # 如果索引超出，结束
    if idx >= len(files):
        return {"current_index": idx} 
    
    file_path = files[idx]
    chapter_name = Path(file_path).stem
    
    # 检查报告是否已存在
    report_dir = Path(state["output_dir"]) / "分章详解"
    safe_title = re.sub(r'[\\/*?:"<>|]', "", chapter_name)[:50]
    report_file = report_dir / f"{idx+1:03d}_{safe_title}_报告.md"
    
    if report_file.exists():
        print(f"✅ 第 {idx+1}/{len(files)} 章报告已存在，跳过分析")
        # 读取已有的 summary
        try:
            with open(report_file, "r", encoding="utf-8") as f:
                content = f.read()
                # 简单提取梗概（假设格式为 ## 剧情梗概\n内容）
                match = re.search(r'## 剧情梗概\n(.+?)(?:\n#|$)', content, re.DOTALL)
                summary = match.group(1).strip() if match else "已存在"
        except:
            summary = "已存在"
        
        new_summaries = state.get("summaries", []) + [f"第{idx+1}章: {summary}"]
        return {
            "summaries": new_summaries,
            "current_index": idx + 1
        }
    
    print(f"🔍 正在分析第 {idx+1}/{len(files)} 章: {chapter_name}...")
    
    # 读取章节内容
    try:
        with open(file_path, "r", encoding="utf-8") as f:
            content = f.read()
    except Exception as e:
        print(f"❌ 读取文件失败: {e}")
        return {"current_index": idx + 1}
    
    # 构建 Prompt
    prompt = f"""
    你是一位专业的小说编辑。请阅读以下章节内容，输出一段简明的剧情梗概（100字左右）。
    
    【本章内容】：
    {content[:4000]} ... (截断) ... {content[-1000:] if len(content) > 5000 else ""}
    
    请直接输出 JSON: {{ "summary": "..." }}
    """
    
    # 调用 LLM
    max_retries = 3
    summary = ""
    
    for attempt in range(max_retries):
        try:
            response = llm.invoke([
                SystemMessage(content="你是一个输出 JSON 的助手。"), 
                HumanMessage(content=prompt)
            ])
            analysis = JsonOutputParser().parse(response.content)
            summary = analysis.get("summary", "")
            break
        except Exception as e:
            if attempt < max_retries - 1:
                print(f"⚠️ 分析失败 (尝试 {attempt+1}/{max_retries}): {e}，正在重试...")
            else:
                print(f"❌ 分析彻底失败，跳过本章: {e}")
                return {"current_index": idx + 1}

    # 保存报告
    save_res = save_chapter_report.invoke({
        "output_dir": state["output_dir"],
        "chapter_index": idx + 1,
        "chapter_title": chapter_name,
        "summary": summary
    })
    
    if "error" in save_res:
         print(f"❌ 保存报告失败: {save_res['error']}")
    
    # 更新状态 (追加 summary)
    new_summaries = state.get("summaries", []) + [f"第{idx+1}章: {summary}"]
    
    return {
        "summaries": new_summaries,
        "current_index": idx + 1
    }

def compile_node(state: NovelState):
    """汇总节点"""
    print("📑 分析完成，正在生成最终报告...")
    
    output_dir = Path(state["output_dir"])
    
    # 检查最终报告是否已存在
    required_files = [
        output_dir / "人物档案.md",
        output_dir / "世界观设定.md",
        output_dir / "语言风格分析.md",
        output_dir / "结构分析.md"
    ]
    
    if all(f.exists() for f in required_files):
        print("✅ 检测到最终报告已存在，跳过汇总分析")
        return {}
    
    all_summaries = "\n".join(state["summaries"])
    chapter_files = state["chapter_files"]
    
    # 1. 读取前三章原文用于语言风格分析
    first_three_chapters = []
    for i in range(min(3, len(chapter_files))):
        try:
            with open(chapter_files[i], "r", encoding="utf-8") as f:
                content = f.read()
                first_three_chapters.append(content[:2000])
        except:
            pass
    
    style_text = "\n\n---\n\n".join(first_three_chapters)
    
    # 2. 分析语言风格
    print("🎨 分析语言风格...")
    style_prompt = f"""
    基于以下前三章节选内容，分析这本小说的语言风格基调。
    
    【前三章节选】：
    {style_text}
    
    请输出 JSON，包含字段：
    style_analysis: "详细描述语言风格特点，包括：叙事节奏、用词特点、句式风格、修辞手法等"
    """
    
    style_analysis = ""
    try:
        response = llm.invoke([
            SystemMessage(content="你是一个文学风格分析专家。请输出 JSON。"), 
            HumanMessage(content=style_prompt)
        ])
        result = JsonOutputParser().parse(response.content)
        style_analysis = result.get("style_analysis", "")
    except Exception as e:
        print(f"⚠️ 风格分析失败: {e}")
        style_analysis = "分析失败"
    
    # 3. 并行分析（使用线程池模拟并行）
    print("📊 开始多维度并行分析...")
    
    from concurrent.futures import ThreadPoolExecutor
    
    def analyze_characters():
        print("👥 分析人物档案...")
        prompt = f"""
        基于以下全书剧情梗概，整理出完整的人物档案。
        
        【全书剧情梗概】：
        {all_summaries}
        
        请输出 JSON: {{ "角色名": "角色生平、性格、关系、成长轨迹等完整描述" }}
        """
        try:
            response = llm.invoke([
                SystemMessage(content="你是一个人物分析专家。请输出 JSON。"), 
                HumanMessage(content=prompt)
            ])
            return JsonOutputParser().parse(response.content)
        except Exception as e:
            print(f"⚠️ 人物分析失败: {e}")
            return {}
    
    def analyze_world():
        print("🌍 分析世界观设定...")
        prompt = f"""
        基于以下全书剧情梗概，整理出完整的世界观设定。
        
        【全书剧情梗概】：
        {all_summaries}
        
        请输出 JSON: {{ "设定项名称": "详细描述（如：等级体系、势力分布、地理环境、魔法体系等）" }}
        """
        try:
            response = llm.invoke([
                SystemMessage(content="你是一个世界观设定专家。请输出 JSON。"), 
                HumanMessage(content=prompt)
            ])
            return JsonOutputParser().parse(response.content)
        except Exception as e:
            print(f"⚠️ 世界观分析失败: {e}")
            return {}
    
    def analyze_structure():
        print("📐 分析叙事结构...")
        prompt = f"""
        基于以下全书剧情梗概，进行深度结构分析。
        
        【全书剧情梗概】：
        {all_summaries}
        
        请输出 JSON，包含以下字段：
        {{
            "structure_breakdown": "包含时间线、主线/辅线、伏笔与响应的综合分析（按章节顺序梳理）",
            "pacing": "节奏把控分析（高潮、低谷、转折点的分布）"
        }}
        """
        try:
            response = llm.invoke([
                SystemMessage(content="你是一个叙事结构分析专家。请输出 JSON。"), 
                HumanMessage(content=prompt)
            ])
            return JsonOutputParser().parse(response.content)
        except Exception as e:
            print(f"⚠️ 结构分析失败: {e}")
            return {}
    
    # 使用线程池并行执行
    with ThreadPoolExecutor(max_workers=3) as executor:
        futures = [
            executor.submit(analyze_characters),
            executor.submit(analyze_world),
            executor.submit(analyze_structure)
        ]
        characters, world_setting, structure_analysis = [f.result() for f in futures]
    
    # 4. 汇总所有分析结果
    final_analysis = {
        "characters": characters,
        "world_setting": world_setting,
        "structure_analysis": structure_analysis,
        "style_analysis": style_analysis
    }
    
    try:
        create_master_guide.invoke({
            "output_dir": state["output_dir"],
            "final_analysis": final_analysis
        })
    except Exception as e:
        print(f"❌ 生成报告失败: {e}")

    return {}

def should_continue(state: NovelState):
    """路由逻辑"""
    if state["current_index"] < len(state["chapter_files"]):
        return "analyze"
    return "compile"

# 4. 构建图
def create_novel_workflow():
    workflow = StateGraph(NovelState)
    
    workflow.add_node("split", split_node)
    workflow.add_node("analyze", analyze_node)
    workflow.add_node("compile", compile_node)
    
    workflow.set_entry_point("split")
    
    workflow.add_conditional_edges(
        "analyze",
        should_continue,
        {
            "analyze": "analyze",
            "compile": "compile"
        }
    )
    
    workflow.add_edge("split", "analyze")
    workflow.add_edge("compile", END)
    
    return workflow.compile()
