from pathlib import Path
from typing import List, Tuple, Dict, Any, TypedDict
from dataclasses import dataclass
from dataclasses import field
from langgraph.graph import StateGraph, END
import json
import re
import sys
import os
import string
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

from utils.file_utils import read_docx_file, read_text_file
from models.llm_models import GLOBAL_LLM
from config import cfg
from pydantic import BaseModel, Field, validator


PROJECT_ROOT = Path(__file__).parent.parent.resolve()


class CharacterProfile(BaseModel):
    """角色人设数据模型"""
    description: str = Field(description="角色人设描述，包括性格、背景、特点等")
    importance: str = Field(description="角色重要性：主角/配角/龙套")


class EpisodeContent(BaseModel):
    """单集剧本内容数据模型"""
    episode_number: int = Field(description="集数编号")
    start: str = Field(description="本集开始文本，如第几集、第几章、第几幕、第几场等")
    characters: List[str] = Field(default=[], description="本集出现的角色列表")
    content: str = Field(default="", description="本集剧本内容")
    summary: str = Field(default="", description="本集剧本梗概")


class ScriptStructure(BaseModel):
    """剧本结构数据模型"""
    outline: str = Field(description="剧本故事大纲")
    main_characters: List[str] = Field(description="主要角色列表")
    character_profiles: Dict[str, CharacterProfile] = Field(description="角色人设详情")
    episodes: List[EpisodeContent] = Field(description="分集结果")
    


@dataclass
class ScriptAnalysisAgentInput:
    """Script Analysis Agent Input"""
    script: str


@dataclass
class ScriptAnalysisAgentOutput:
    """Script Analysis Agent Output"""
    script_name: str
    script_structure: ScriptStructure


class ScriptAnalysisAgentState(TypedDict):
    """Script Analysis Agent State"""
    script_input: str
    script_content: str
    script_structure: ScriptStructure
    result: ScriptAnalysisAgentOutput


@dataclass
class ScriptAnalysisAgentConfig:
    """Script Analysis Agent Config"""
    agent_name: str
    max_llm_context: int = 32000


class ScriptAnalysisAgent:
    """Script Analysis Agent"""
    
    def __init__(self, config: ScriptAnalysisAgentConfig):
        self.config = config
        self.max_llm_context = config.max_llm_context
        self.app = self._build_graph()
        self.llm = GLOBAL_LLM

        self.workspace_dir = PROJECT_ROOT / "agents_workspace" / self.config.agent_name
        if not self.workspace_dir.exists():
            self.workspace_dir.mkdir(parents=True, exist_ok=True)


    def _build_graph(self) -> StateGraph:
        """Build the script analysis graph"""
        workflow = StateGraph(ScriptAnalysisAgentState)
        
        # 添加节点
        workflow.add_node("load_script", self._load_script)
        workflow.add_node("analyze_structure", self._analyze_structure)
        workflow.add_node("extract_episodes", self._extract_episodes)
        workflow.add_node("generate_result", self._generate_result)
        
        # 设置边
        workflow.set_entry_point("load_script")
        workflow.add_edge("load_script", "analyze_structure")
        workflow.add_edge("analyze_structure", "extract_episodes")
        workflow.add_edge("extract_episodes", "generate_result")
        workflow.add_edge("generate_result", END)
        
        return workflow.compile()
    
    async def _load_script(self, state: ScriptAnalysisAgentState) -> ScriptAnalysisAgentState:
        """加载剧本内容"""
        try:
            script_path = state["script_input"]
            if script_path.endswith('.docx'):
                script_content = read_docx_file(script_path)
            else:
                script_content = read_text_file(script_path)
            
            if script_content:
                state["script_content"] = script_content
                print(f"成功加载剧本，长度: {len(script_content)} 字符")
            else:
                raise ValueError("无法读取剧本内容")
        except Exception as e:
            print(f"加载剧本时出错: {e}")
            state["script_content"] = ""
        return state
    
    async def _get_script_outline_and_characters(self, script_content: str) -> dict:
        """第一步：使用大模型获取故事大纲和角色信息"""
        print("🤖 (1/2) 正在分析故事大纲和角色...")
        prompt = f"""
你是一个专业的剧本分析师。请仔细分析以下剧本内容，并提供故事大纲和角色简介。

分析要求：
1. 提取故事大纲，总结主要情节和主题。
2. 识别所有主要角色。
3. 为每个主要角色生成详细的人设描述。

剧本内容：
{script_content}... 

请严格按照以下JSON格式返回数据，不要包含任何其他文字或格式：
{{
    "outline": "故事大纲内容",
    "main_characters": ["角色1", "角色2", "角色3"],
    "character_profiles": {{
        "角色1": {{
            "description": "角色描述",
            "importance": "主角"
        }},
        "角色2": {{
            "description": "角色描述",
            "importance": "配角"
        }}
    }}
}}
"""
        messages = [{"role": "user", "content": prompt}]
        response = await self.llm.a_run(messages, max_tokens=2048)
        
        return self._parse_llm_json_response(response, "故事大纲和角色分析")

    async def _get_episode_markers(self, script_content: str) -> dict:
        """第二步：使用大模型获取分集标记"""
        print("🤖 (2/2) 正在分析分集标记...")
        prompt = f"""
你是一个专业的剧本分析师。你的任务是仔细阅读下面的剧本内容，并识别出每一集的起始分割点，不要遗漏集数。

分析要求：
1. **查找起始标记**：请寻找剧本中明确的集数标记，例如 "第一集"、"第二集"、"Episode 1"、"第 1 章" 等。
2. **记录起始文本**：在 `start` 字段中，只记录每一集开始的完整标记文本（例如，"第一集"）。
3. **严格按JSON格式返回**：只返回一个包含 "episodes" 列表的JSON对象，每个对象只包含 `episode_number` 和 `start`。

剧本内容：
{script_content}...

请严格按照以下JSON格式返回数据，不要包含任何其他文字或格式：
{{
    "episodes": [
        {{
            "episode_number": 1,
            "start": "第一集"
        }},
        {{
            "episode_number": 2,
            "start": "第二集"
        }}
    ]
}}
"""
        messages = [{"role": "user", "content": prompt}]
        response = await self.llm.a_run(messages)
        
        return self._parse_llm_json_response(response, "分集标记分析")

    async def _analyze_structure(self, state: ScriptAnalysisAgentState) -> ScriptAnalysisAgentState:
        """使用大模型分步分析剧本结构，生成结构化数据"""
        try:
            script_content = state["script_content"]
        
            # 第一步：获取大纲和角色
            outline_and_chars_data = await self._get_script_outline_and_characters(script_content)
            if not outline_and_chars_data:
                raise ValueError("第一步：分析故事大纲和角色失败")

            # 第二步：获取分集标记
            episode_data = await self._get_episode_markers(script_content)
            if not episode_data or "episodes" not in episode_data:
                raise ValueError("第二步：分析分集标记失败")

            # 合并结果
            full_structure_data = {**outline_and_chars_data, **episode_data}
            
            script_structure = ScriptStructure(**full_structure_data)
            
            if script_structure:
                state["script_structure"] = script_structure
                print(f"✅ 成功分析剧本结构，识别到 {len(script_structure.episodes)} 集")
                print(f"📖 主要角色: {script_structure.character_profiles.keys()}")
                print(f"📝 故事大纲: {script_structure.outline[:100]}...")
            else:
                print("❌ 剧本结构分析失败")
                state["script_structure"] = None
                
        except Exception as e:
            print(f"❌ 分析剧本结构时出错: {e}")
            state["script_structure"] = None
        return state
    
    async def _extract_episodes(self, state: ScriptAnalysisAgentState) -> ScriptAnalysisAgentState:
        """根据分析结果提取各集的实际内容"""
        try:
            script_content = state["script_content"]
            script_structure = state["script_structure"]
            
            if not script_structure or not script_structure.episodes:
                print("❌ 没有可用的剧本结构数据，跳过内容提取")
                return state
            
            print(f"🔍 开始提取 {len(script_structure.episodes)} 集的实际内容...")
            
            # 提取每集的实际内容
            enhanced_episodes = await self._extract_episode_contents(script_content, script_structure.episodes)
            
            if enhanced_episodes:
                # 更新剧本结构中的episodes
                script_structure.episodes = enhanced_episodes
                state["script_structure"] = script_structure
                print(f"✅ 成功提取 {len(enhanced_episodes)} 集的内容")
            else:
                print("❌ 内容提取失败")
                
        except Exception as e:
            print(f"❌ 提取分集内容时出错: {e}")
        return state
    
    async def _extract_episode_contents(self, script_content: str, episodes: List[EpisodeContent]) -> List[EpisodeContent]:
        """根据起止标记提取各集的实际内容"""
        enhanced_episodes = []
        
        for i, episode in enumerate(episodes):
            try:
                print(f"📝 提取第{episode.episode_number}集内容...")
                
                # 查找起始位置
                start_pos = self._find_text_position(script_content, episode.start)
                if start_pos == -1:
                    print(f"⚠️ 未找到第{episode.episode_number}集的起始标记: {episode.start}")
                    continue
                
                # 查找结束位置
                if i < len(episodes) - 1:
                    # 不是最后一集，使用下一集的开始作为当前集的结束
                    next_episode = episodes[i + 1]
                    end_pos = self._find_text_position(script_content, next_episode.start, start_pos + 1)
                    if end_pos == -1:
                        print(f"⚠️ 未找到下一集标记，使用文档结尾")
                        end_pos = len(script_content)
                else:
                    # 最后一集，使用文档结尾
                    end_pos = len(script_content)
                
                # 提取内容
                episode_content = script_content[start_pos:end_pos].strip()
                
                if episode_content:
                    # 使用大模型一次性分析本集内容（角色+梗概）
                    characters, summary = await self._analyze_episode_content_with_llm(episode_content)
                    
                    # 创建增强的episode对象
                    enhanced_episode = EpisodeContent(
                        episode_number=episode.episode_number,
                        start=episode.start,
                        content=episode_content,
                        summary=summary,
                        characters=characters
                    )
                    enhanced_episodes.append(enhanced_episode)
                    
                    print(f"✅ 第{episode.episode_number}集提取完成: {len(episode_content)}字符")
                else:
                    print(f"⚠️ 第{episode.episode_number}集内容为空")
                    
            except Exception as e:
                print(f"❌ 提取第{episode.episode_number}集时出错: {e}")
                continue
        
        return enhanced_episodes
    
    async def _analyze_episode_content_with_llm(self, episode_content: str) -> tuple[List[str], str]:
        """使用大模型一次性分析单集内容：提取角色和生成梗概"""
        try:
            prompt = f"""
你是一个专业的剧本分析师。请分析以下单集剧本内容，完成两个任务：

任务1：提取主要角色
- 只提取在这一集中有明确台词内容的角色
- 角色必须有具体的对话或独白
- 排除旁白、背景描述等非对话内容

任务2：生成剧集梗概
- 总结本集的主要情节和关键事件
- 语言简洁明了

剧本内容：
{episode_content}...

请严格按照以下JSON格式返回结果：
{{
    "characters": ["角色1", "角色2"],
    "summary": "本集的简洁梗概"
}}

请只返回JSON数据，不要其他文字。
"""
            
            messages = [{"role": "user", "content": prompt}]
            response = await self.llm.a_run(messages)
            
            # 解析JSON响应
            result = self._parse_llm_json_response(response, "单集内容分析")
            if result:
                characters = result.get("characters", [])
                summary = result.get("summary", "")
                return characters, summary
                    
        except Exception as e:
            print(f"⚠️ 大模型分析失败: {e}")
            
        return [], ""
    
    def _find_text_position(self, content: str, search_text: str, start_from: int = 0) -> int:
        """在内容中查找指定文本的位置"""
        if not search_text:
            return -1
        
        # 尝试精确匹配
        pos = content.find(search_text, start_from)
        if pos != -1:
            return pos
        
        # 尝试模糊匹配（去除空格和标点）
        clean_search = search_text.translate(str.maketrans('', '', string.punctuation + ' \n\t'))
        clean_content = content.translate(str.maketrans('', '', string.punctuation + ' \n\t'))
        
        pos = clean_content.find(clean_search, start_from)
        if pos != -1:
            # 将清理后的位置映射回原始内容
            return self._map_clean_position_to_original(content, clean_content, pos)
        
        # 尝试关键词匹配
        keywords = search_text.split()
        for keyword in keywords:
            if len(keyword) > 2:  # 只考虑长度大于2的关键词
                pos = content.find(keyword, start_from)
                if pos != -1:
                    return pos
        
        return -1
    
    def _map_clean_position_to_original(self, original: str, clean: str, clean_pos: int) -> int:
        """将清理后文本的位置映射回原始文本位置"""
        original_pos = 0
        clean_count = 0
        
        while original_pos < len(original) and clean_count < clean_pos:
            char = original[original_pos]
            if char not in ' \n\t' + string.punctuation:
                clean_count += 1
            original_pos += 1
        
        return original_pos
    
    def _parse_llm_json_response(self, response, response_type: str = "未知") -> dict:
        """解析大模型返回的JSON响应的公共函数"""
        try:
            # 提取JSON部分
            json_match = re.search(r'\{.*\}', response.content, re.DOTALL)
            if not json_match:
                print(f"❌ {response_type}：未找到JSON格式数据")
                return None
            
            json_str = json_match.group()
            
            # 清理字符串，去除多余的空格和换行
            cleaned_str = re.sub(r'\s+', ' ', json_str).strip()
            
            # 尝试修复可能的截断
            open_brackets = cleaned_str.count('{') - cleaned_str.count('}')
            open_squares = cleaned_str.count('[') - cleaned_str.count(']')
            cleaned_str += '}' * open_brackets
            cleaned_str += ']' * open_squares
            
            # 解析JSON
            result = json.loads(cleaned_str)
            print(f"✅ {response_type}：JSON解析成功")
            return result
            
        except json.JSONDecodeError as e:
            print(f"❌ {response_type}：JSON解析失败: {e}")
            print(f"🔍 出错位置附近的内容: {cleaned_str[max(0, e.pos-50):e.pos+50] if 'cleaned_str' in locals() else 'N/A'}")
            return None
        except Exception as e:
            print(f"❌ {response_type}：解析过程中出错: {e}")
            return None
    
    async def _save_analysis_to_json(self, script_structure: ScriptStructure, script_name: str) -> None:
        """保存剧本分析结果为JSON文件"""
        try:
            # 创建输出目录
            output_dir = self.workspace_dir / f"{script_name}"
            output_dir.mkdir(exist_ok=True)
            
            # 转换为字典格式
            analysis_data = {
                "script_name": script_name,
                "outline": script_structure.outline,
                "main_characters": script_structure.main_characters,
                "character_profiles": {name: profile.dict() for name, profile in script_structure.character_profiles.items()},
                "episodes": [
                    {
                        "episode_number": episode.episode_number,
                        "start": episode.start,
                        "content": episode.content,
                        "summary": episode.summary,
                        "characters": episode.characters
                    }
                    for episode in script_structure.episodes
                ]
            }
            
            # 保存为JSON文件
            json_path = output_dir / f"{script_name}_analysis.json"
            with open(json_path, 'w', encoding='utf-8') as f:
                json.dump(analysis_data, f, ensure_ascii=False, indent=2)
            
            print(f"💾 保存分析结果到: {json_path}")
            
        except Exception as e:
            print(f"❌ 保存JSON文件时出错: {e}")
    
    async def _generate_result(self, state: ScriptAnalysisAgentState) -> ScriptAnalysisAgentState:
        """生成最终结果"""
        try:
            script_structure = state.get("script_structure")
            
            if not script_structure:
                print("❌ 没有可用的剧本结构数据")
                state["result"] = None
                return state
            
            # 从文件名提取剧本名
            script_path = state["script_input"]
            script_name = Path(script_path).stem
            
            state["result"] = ScriptAnalysisAgentOutput(
                script_name=script_name,
                script_structure=script_structure
            )
            
            # 保存分析结果为JSON文件
            if script_structure.episodes:
                await self._save_analysis_to_json(script_structure, script_name)
            
            print("✅ 剧本分析完成")
            print(f"📊 分析结果:")
            print(f"  📖 剧本名: {script_name}")
            print(f"  📚 集数: {len(script_structure.episodes)}")
            print(f"  👥 角色数: {len(script_structure.main_characters)}")
            print(f"  📝 故事大纲: {script_structure.outline[:100]}...")
            
        except Exception as e:
            print(f"❌ 生成结果时出错: {e}")
            state["result"] = None
        return state
    
    async def run_analysis(self, script_input: str) -> ScriptAnalysisAgentOutput:
        """运行剧本分析"""
        initial_state = ScriptAnalysisAgentState(
            script_input=script_input,
            script_content="",
            script_structure=None,
            result=None
        )
        
        print(f"🚀 开始分析剧本: {script_input}")
        
        # 运行工作流
        final_state = await self.app.ainvoke(initial_state)
        
        return final_state["result"]
