"""
内容分析器模块
功能：对输入的长文本进行语义分析和要点提炼
"""

import json
from typing import Dict, List, Optional
from pathlib import Path

from ..base import register_tool, init_tool_instance
from ..utils.long_text_processor import LongTextProcessor, load_text_from_source


def detect_language(text: str) -> str:
    """检测文本语言"""
    import re
    if not text:
        return "en"
    sample = text[:500]
    chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', sample))
    total_chars = len([c for c in sample if c.isalnum() or c in '，。！？《》；："\'（）【】「」'])
    return "zh" if (total_chars > 0 and chinese_chars / total_chars > 0.3) else "en"


@register_tool("content_analyzer")
class ContentAnalyzer:
    """
    内容分析器：对输入文本进行深度分析
    - 主题建模（识别核心主题）
    - 情感分析（整体情感基调）
    - 要点提炼（关键信息抽取）
    - 叙事结构建议
    """
    def analyze(self, text):
        try:
            # 调用 LLM 分析
            result = self.llm_analyze(text)
            
            # 检查必要字段是否存在
            required_fields = ['主题', '情感', '关键点', '结构']
            is_complete = all(field in result for field in required_fields)
            
            if is_complete:
                print("✅ 分析任务完成")
                return result
            else:
                print("❌ 分析不完整，缺少字段:", 
                      [f for f in required_fields if f not in result])
                return None
                
        except Exception as e:
            print(f"❌ 分析失败: {e}")
            return None
    
    def __init__(self, cfg: Dict):
        self.cfg = cfg
        self.llm_type = cfg.get("llm", "qwen")
        self.api_key = cfg.get("api_key")
        self.temperature = cfg.get("temperature", 0.3)  # 分析任务用较低温度
        
    def _get_llm_cfg(self, system_prompt):
        """获取 LLM 配置"""
        cfg = {
            "system_prompt": system_prompt,
            "track_history": False
        }
        if self.api_key:
            cfg["api_key"] = self.api_key
        return cfg
    
    def analyze_content(self, text: str, max_length: int = 5000) -> Dict:
        """
        分析内容并返回结构化结果
        
        Args:
            text: 输入文本
            max_length: 分析用的文本最大长度（避免超token）
            
        Returns:
            分析结果字典，包含：
            - themes: 主题列表
            - sentiment: 情感分析
            - key_points: 关键要点
            - narrative_structure: 叙事结构建议
            - language: 检测到的语言
        """
        # 检测语言
        language = detect_language(text)
        
        # 截取文本用于分析（避免超token）
        analysis_text = text[:max_length] if len(text) > max_length else text
        
        print(f"\n{'='*60}")
        print(f"📊 内容分析中... (语言: {'中文' if language == 'zh' else '英文'})")
        print(f"{'='*60}")
        
        # 根据语言选择 prompt
        if language == "zh":
            system_prompt = self._get_chinese_system_prompt()
            analysis_prompt = self._get_chinese_analysis_prompt(analysis_text)
        else:
            system_prompt = self._get_english_system_prompt()
            analysis_prompt = self._get_english_analysis_prompt(analysis_text)
        
        # 创建分析器
        analyzer = init_tool_instance({
            "tool": self.llm_type,
            "cfg": self._get_llm_cfg(system_prompt)
        })
        
        # 调用 LLM 分析
        try:
            result, success = analyzer.call(
                analysis_prompt,
                max_length=2048,
                temperature=self.temperature
            )
            
            if success:
                # 解析 JSON 结果
                result_dict = self._parse_analysis_result(result, language)
                result_dict["language"] = language
                result_dict["original_length"] = len(text)
                return result_dict
            else:
                print("⚠️  分析失败，返回基础分析")
                return self._get_fallback_analysis(text, language)
                
        except Exception as e:
            print(f"❌ 分析出错: {e}")
            return self._get_fallback_analysis(text, language)
    
    def _get_chinese_system_prompt(self) -> str:
        """中文分析系统提示词"""
        return """你是一位专业的内容分析专家，擅长对文本进行深度语义分析。
你的任务是：
1. 识别文本的核心主题（3-5个）
2. 分析整体情感基调和情感变化
3. 提炼关键要点（5-8个核心信息）
4. 建议视频叙事结构（如何划分段落）

输出必须是有效的 JSON 格式，包含以下字段：
{
    "themes": ["主题1", "主题2", ...],
    "sentiment": {
        "overall": "积极/中性/消极",
        "description": "详细描述"
    },
    "key_points": ["要点1", "要点2", ...],
    "narrative_structure": {
        "style": "叙事风格建议",
        "segments": ["段落1描述", "段落2描述", ...]
    }
}"""

    def _get_english_system_prompt(self) -> str:
        """英文分析系统提示词"""
        return """You are a professional content analysis expert specializing in deep semantic analysis.
Your tasks:
1. Identify core themes (3-5)
2. Analyze overall sentiment and emotional arc
3. Extract key points (5-8 core pieces of information)
4. Suggest video narrative structure (how to divide into segments)

Output must be valid JSON format with the following fields:
{
    "themes": ["theme1", "theme2", ...],
    "sentiment": {
        "overall": "positive/neutral/negative",
        "description": "detailed description"
    },
    "key_points": ["point1", "point2", ...],
    "narrative_structure": {
        "style": "suggested narrative style",
        "segments": ["segment1 description", "segment2 description", ...]
    }
}"""

    def _get_chinese_analysis_prompt(self, text: str) -> str:
        """中文分析提示词"""
        return f"""请深度分析以下文本内容：

文本：
{text}

请按照以下要求进行分析：

1. 主题识别：提取3-5个核心主题，每个主题用2-5个字概括
2. 情感分析：
   - overall: 整体情感倾向（积极/中性/消极）
   - description: 详细描述情感特点和变化（50-100字）
3. 关键要点：提炼5-8个核心信息点，每个要点用一句话表达（15-30字）
4. 叙事结构建议：
   - style: 建议的视频叙事风格（如：纪录片风格、故事叙述、教程讲解等）
   - segments: 建议如何划分为3-5个视频段落，每个段落的核心内容

输出格式：纯 JSON，不要任何其他文字。
JSON:"""

    def _get_english_analysis_prompt(self, text: str) -> str:
        """英文分析提示词"""
        return f"""Please conduct a deep analysis of the following text:

Text:
{text}

Analysis requirements:

1. Theme Identification: Extract 3-5 core themes, each summarized in 2-5 words
2. Sentiment Analysis:
   - overall: Overall sentiment (positive/neutral/negative)
   - description: Detailed description of emotional characteristics and changes (50-100 words)
3. Key Points: Extract 5-8 core pieces of information, each expressed in one sentence (15-30 words)
4. Narrative Structure Suggestion:
   - style: Suggested video narrative style (e.g., documentary, storytelling, tutorial)
   - segments: Suggest how to divide into 3-5 video segments, core content of each segment

Output format: Pure JSON only, no other text.
JSON:"""

    def _parse_analysis_result(self, result: str, language: str) -> Dict:
        """解析 LLM 返回的 JSON 结果"""
        # 清理可能的 markdown 标记
        result = result.strip().strip('`').strip()
        if result.startswith('json'):
            result = result[4:].strip()
        
        try:
            data = json.loads(result)
            return data
        except json.JSONDecodeError as e:
            print(f"⚠️  JSON 解析失败: {e}")
            print(f"原始输出: {result[:200]}...")
            return self._get_fallback_analysis("", language)
    
    def _get_fallback_analysis(self, text: str, language: str) -> Dict:
        """备用分析结果（当 LLM 失败时）"""
        if language == "zh":
            return {
                "themes": ["内容分析", "信息提炼", "视频生成"],
                "sentiment": {
                    "overall": "中性",
                    "description": "文本内容较为客观，情感倾向不明显"
                },
                "key_points": [
                    "文本已加载并准备处理",
                    "将根据内容生成视频脚本",
                    "保持内容的核心信息完整性"
                ],
                "narrative_structure": {
                    "style": "叙事风格",
                    "segments": ["引言", "主体内容", "总结"]
                },
                "language": language,
                "original_length": len(text)
            }
        else:
            return {
                "themes": ["Content Analysis", "Information Extraction", "Video Generation"],
                "sentiment": {
                    "overall": "neutral",
                    "description": "Text content is relatively objective with no obvious emotional bias"
                },
                "key_points": [
                    "Text has been loaded and ready for processing",
                    "Will generate video script based on content",
                    "Maintain integrity of core information"
                ],
                "narrative_structure": {
                    "style": "narrative style",
                    "segments": ["Introduction", "Main Content", "Conclusion"]
                },
                "language": language,
                "original_length": len(text)
            }
    
    def display_analysis(self, analysis: Dict):
        """美化显示分析结果"""
        lang = analysis.get("language", "en")
        
        if lang == "zh":
            print(f"\n{'='*60}")
            print("📊 内容分析报告")
            print(f"{'='*60}")
            
            print(f"\n📚 核心主题:")
            for i, theme in enumerate(analysis.get("themes", []), 1):
                print(f"  {i}. {theme}")
            
            print(f"\n💭 情感分析:")
            sentiment = analysis.get("sentiment", {})
            print(f"  整体倾向: {sentiment.get('overall', 'N/A')}")
            print(f"  详细描述: {sentiment.get('description', 'N/A')}")
            
            print(f"\n🎯 关键要点:")
            for i, point in enumerate(analysis.get("key_points", []), 1):
                print(f"  {i}. {point}")
            
            print(f"\n🎬 叙事结构建议:")
            structure = analysis.get("narrative_structure", {})
            print(f"  推荐风格: {structure.get('style', 'N/A')}")
            print(f"  段落划分:")
            for i, seg in enumerate(structure.get("segments", []), 1):
                print(f"    {i}. {seg}")
            
            print(f"\n{'='*60}")
        else:
            print(f"\n{'='*60}")
            print("📊 Content Analysis Report")
            print(f"{'='*60}")
            
            print(f"\n📚 Core Themes:")
            for i, theme in enumerate(analysis.get("themes", []), 1):
                print(f"  {i}. {theme}")
            
            print(f"\n💭 Sentiment Analysis:")
            sentiment = analysis.get("sentiment", {})
            print(f"  Overall: {sentiment.get('overall', 'N/A')}")
            print(f"  Description: {sentiment.get('description', 'N/A')}")
            
            print(f"\n🎯 Key Points:")
            for i, point in enumerate(analysis.get("key_points", []), 1):
                print(f"  {i}. {point}")
            
            print(f"\n🎬 Narrative Structure Suggestion:")
            structure = analysis.get("narrative_structure", {})
            print(f"  Recommended Style: {structure.get('style', 'N/A')}")
            print(f"  Segment Division:")
            for i, seg in enumerate(structure.get("segments", []), 1):
                print(f"    {i}. {seg}")
            
            print(f"\n{'='*60}")
    
    def save_analysis(self, analysis: Dict, output_path: Path):
        """保存分析结果到文件"""
        output_path = Path(output_path)
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(analysis, f, ensure_ascii=False, indent=2)
        
        print(f"✅ 分析结果已保存: {output_path}")
    
    def call(self, params: Dict) -> Dict:
        """
        主调用接口
        
        params 应包含:
        {
            "text": "待分析文本" 或
            "file_path": "文件路径" 或
            "dir_path": "目录路径",
            "output_path": "分析结果保存路径" (可选),
            "display": True/False (是否显示分析结果，默认True)
        }
        """
        # 加载文本
        text = ""
        if params.get("text"):
            text = params["text"]
        elif params.get("file_path"):
            docs = load_text_from_source(Path(params["file_path"]))
            text = "\n\n".join([doc.content for doc in docs])
        elif params.get("dir_path"):
            docs = load_text_from_source(Path(params["dir_path"]))
            text = "\n\n".join([doc.content for doc in docs])
        else:
            raise ValueError("请提供 text、file_path 或 dir_path 之一")
        
        # 分析内容
        analysis = self.analyze_content(text)
        
        # 显示结果
        if params.get("display", True):
            self.display_analysis(analysis)
        
        # 保存结果
        if params.get("output_path"):
            self.save_analysis(analysis, params["output_path"])
        
        return analysis