from typing import Dict, Any, List
import json
from langchain_core.messages import HumanMessage, AIMessage
from langgraph.graph import StateGraph, END, START
from langgraph.prebuilt import ToolNode
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.tools import tool
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_community.chat_models import ChatOllama
from langchain_community.tools.tavily_search import TavilySearchResults
from typing import TypedDict, List

# Define node functions
def plan_research(state: Dict[str, Any]) -> Dict[str, Any]:
    """Plan the research based on the topic."""
    topic = state["topic"]
    config = state["config_params"]
    
    # Initialize the planner model
    planner = ChatOllama(model=config["planner_model"])
    
    # Planning prompt
    planning_prompt = ChatPromptTemplate.from_messages([
        ("system", """你是一名专业的研究规划人员。根据用户提供的主题，制定详细的研究计划。
        列出需要探索的关键问题和子主题。每个子主题应该是对主要主题的重要方面的探索。"""),
        ("human", "我需要研究以下主题: {topic}\n\n请提供5个关键子主题或问题，我应该研究以全面了解这个主题。")
    ])
    
    # Execute planning
    chain = planning_prompt | planner | StrOutputParser()
    result = chain.invoke({"topic": topic})
    
    # Extract subtopics
    subtopics = [line.strip().split(". ", 1)[-1] for line in result.split("\n") 
                 if line.strip() and any(c.isdigit() for c in line)]
    
    return {
        "subtopics": subtopics[:5],  # Ensure only 5 subtopics max
        "plan": result,
        "research_results": {},
        "current_subtopic_index": 0
    }

def research_subtopic(state: Dict[str, Any]) -> Dict[str, Any]:
    """Research a specific subtopic using Tavily search."""
    subtopics = state.get("subtopics", [])
    current_index = state.get("current_subtopic_index", 0)
    
    if current_index >= len(subtopics):
        return {"should_end": True}
    
    current_subtopic = subtopics[current_index]
    topic = state["topic"]
    
    # Create search query
    search_query = f"{topic} {current_subtopic}"
    
    # Use Tavily search
    search_tool = TavilySearchResults(max_results=3)
    search_results = search_tool.invoke({"query": search_query})
    
    # Update research results
    research_results = state.get("research_results", {})
    research_results[current_subtopic] = search_results
    
    return {
        "research_results": research_results,
        "current_subtopic": current_subtopic,
        "current_subtopic_index": current_index + 1
    }

def compile_report(state: Dict[str, Any]) -> Dict[str, Any]:
    """Compile research results into a comprehensive report."""
    topic = state["topic"]
    subtopics = state.get("subtopics", [])
    research_results = state.get("research_results", {})
    config = state["config_params"]
    report_structure = config["report_structure"]
    
    # Initialize writer model
    writer = ChatOllama(model=config["writer_model"])
    
    # Generate the report
    report_prompt = ChatPromptTemplate.from_messages([
        ("system", f"""你是一名专业的研究报告撰写者。使用以下结构撰写一份全面的研究报告：
        
        {report_structure}
        
        基于提供的研究数据为每个子主题创建内容丰富的部分。确保引用来源并提供深入的分析。"""),
        ("human", """请为主题"{topic}"创建一份深入的研究报告。

        以下是主要子主题和相关研究数据：
        
        {research_data}
        
        请遵循提供的报告结构，创建一份连贯、信息丰富的报告。""")
    ])
    
    # Format research data for prompt
    research_data = ""
    for i, subtopic in enumerate(subtopics):
        if subtopic in research_results:
            research_data += f"\n\n## 子主题 {i+1}: {subtopic}\n"
            for j, result in enumerate(research_results[subtopic]):
                research_data += f"\n来源 {j+1}: {result.get('title', 'No title')}\n"
                research_data += f"内容: {result.get('content', 'No content')}\n"
                research_data += f"链接: {result.get('url', 'No URL')}\n"
    
    # Execute report generation
    chain = report_prompt | writer | StrOutputParser()
    report = chain.invoke({"topic": topic, "research_data": research_data})
    
    return {"report": report, "should_end": True}

def router(state: Dict[str, Any]) -> Dict[str, Any]:
    """Route the workflow based on the current state."""
    if state.get("should_end", False):
        return {"next": "finish"}
    
    if "subtopics" not in state:
        return {"next": "plan_research_node"}
    
    current_index = state.get("current_subtopic_index", 0)
    subtopics = state.get("subtopics", [])
    
    if current_index < len(subtopics):
        return {"next": "research"}
    
    return {"next": "compile"}

def finish(state: Dict[str, Any]) -> Dict[str, Any]:
    """Finish the workflow and prepare the final output."""
    # Return the final report for interruption
    interrupt_value = f"## 研究报告: {state['topic']}\n\n{state.get('report', '没有生成报告')}"
    return {"finish": interrupt_value}

class State(TypedDict):
    topic: str
    config_params: dict
    subtopics: list
    plan: str
    research_results: dict
    current_subtopic_index: int
    current_subtopic: str
    report: str
    should_end: bool
# Build the graph
builder = StateGraph(
    state_schema=State
)

# Add nodes
builder.add_node("router", router)
builder.add_node("plan_research_node", plan_research)
builder.add_node("research", research_subtopic)
builder.add_node("compile", compile_report)
builder.add_node("finish", finish)

# Add edges
builder.add_edge(START, "router")

# Add conditional edges from router
builder.add_conditional_edges(
    "router",
    lambda x: x["next"],
    {
        "plan_research_node": "plan_research_node",
        "research": "research",
        "compile": "compile",
        "finish": "finish"
    }
)

# Add edges back to router
builder.add_edge("plan_research_node", "router")
builder.add_edge("research", "router")
builder.add_edge("compile", "router")

# Set entrypoint and finish node
builder.set_entry_point("router")
# builder.set_finish_point("finish") # When router returns "finish", workflow will end at finish node

# 添加自定义回调函数来记录工作流执行细节
class WorkflowLogger:
    def on_node_start(self, node_name, inputs):
        print(f"\n[开始执行] 节点: {node_name}")
        print(f"输入: {json.dumps(inputs, ensure_ascii=False, indent=2)}")
    
    def on_node_end(self, node_name, outputs):
        print(f"[完成执行] 节点: {node_name}")
        print(f"输出: {json.dumps(outputs, ensure_ascii=False, indent=2)}")
    
    def on_workflow_start(self, inputs):
        print("\n" + "="*50)
        print("工作流开始执行")
        print(f"初始输入: {json.dumps(inputs, ensure_ascii=False, indent=2)}")
        print("="*50)
    
    def on_workflow_end(self, outputs):
        print("\n" + "="*50)
        print("工作流执行完成")
        print(f"最终输出: {json.dumps(outputs, ensure_ascii=False, indent=2)}")
        print("="*50)

# 创建工作流实例，启用详细日志和跟踪
workflow = builder.compile()

# 使用示例
def run_research_workflow(topic, config_params):
    """运行研究工作流并显示执行细节"""
    logger = WorkflowLogger()
    
    # 设置初始状态
    initial_state = {
        "topic": topic,
        "config_params": config_params
    }
    
    # 运行工作流，并传入回调函数
    result = workflow.invoke(
        initial_state,
        callbacks=[
            {"on_node_start": logger.on_node_start},
            {"on_node_end": logger.on_node_end},
            {"on_workflow_start": logger.on_workflow_start},
            {"on_workflow_end": logger.on_workflow_end}
        ]
    )
    
    return result

# 导出工作流和运行函数
__all__ = ["workflow", "run_research_workflow"]
