import json
from typing import Dict, List, Union
import random
import re
from pathlib import Path

from tqdm import trange, tqdm

from ..utils.llm_output_check import parse_list
from ..utils.long_text_processor import LongTextProcessor, load_text_from_source, Document
from ..base import register_tool, init_tool_instance
from ..prompts_en import question_asker_system, expert_system, \
    dlg_based_writer_system, dlg_based_writer_prompt, chapter_writer_system


def detect_language(text: str) -> str:
    """
    检测文本语言（中文或英文）
    
    Args:
        text: 输入文本
    
    Returns:
        "zh" 或 "en"
    """
    if not text:
        return "en"  # 默认英文
    
    # 采样前500个字符进行检测
    sample = text[:500] if len(text) > 500 else text
    
    # 统计中文字符数量
    chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', sample))
    total_chars = len([c for c in sample if c.isalnum() or c in '，。！？、；：""''（）【】《》'])
    
    # 如果中文字符占比超过30%，认为是中文
    if total_chars > 0 and chinese_chars / total_chars > 0.3:
        return "zh"
    else:
        return "en"


def json_parse_outline(outline):
    outline = outline.strip("```json").strip("```")
    try:
        outline = json.loads(outline)
        if not isinstance(outline, dict):
            return False
        if outline.keys() != {"story_title", "story_outline"}:
            return False
        for chapter in outline["story_outline"]:
            if chapter.keys() != {"chapter_title", "chapter_summary"}:
                return False
    except json.decoder.JSONDecodeError:
        return False
    return True


@register_tool("qa_outline_story_writer")
class QAOutlineStoryWriter:

    def __init__(self,
                 cfg: Dict):
        self.cfg = cfg
        self.temperature = cfg.get("temperature", 1.0)
        self.max_conv_turns = cfg.get("max_conv_turns", 3)
        self.num_outline = cfg.get("num_outline", 4)
        self.llm_type = cfg.get("llm", "qwen")
        self.api_key = cfg.get("api_key")  # 从配置中获取 API Key

    def _get_llm_cfg(self, system_prompt, track_history=False):
        """辅助方法：获取 LLM 配置，包含 API Key"""
        cfg = {
            "system_prompt": system_prompt,
            "track_history": track_history
        }
        if self.api_key:
            cfg["api_key"] = self.api_key
        return cfg
    
    def generate_outline(self, params):
        # `params`: story setting like 
        # {
        #     "story_title": "xxx",
        #     "main_role": "xxx",
        #     ......
        # }
        asker = init_tool_instance({
            "tool": self.llm_type,
            "cfg": self._get_llm_cfg(question_asker_system, track_history=False)
        })
        expert = init_tool_instance({
            "tool": self.llm_type,
            "cfg": self._get_llm_cfg(expert_system, track_history=False)
        })

        dialogue = []
        for turn in trange(self.max_conv_turns):
            dialogue_history = "\n".join(dialogue)
            
            question, success = asker.call(
                f"Story setting: {params}\nDialogue history: \n{dialogue_history}\n",
                temperature=self.temperature
            )
            question = question.strip()
            if question == "Thank you for your help!":
                break
            dialogue.append(f"You: {question}")
            answer, success = expert.call(
                f"Story setting: {params}\nQuestion: \n{question}\nAnswer: ",
                temperature=self.temperature
            )
            answer = answer.strip()
            dialogue.append(f"Expert: {answer}")

        # print("\n".join(dialogue))
        writer = init_tool_instance({
            "tool": self.llm_type,
            "cfg": self._get_llm_cfg(dlg_based_writer_system, track_history=False)
        })
        writer_prompt = dlg_based_writer_prompt.format(
            story_setting=params,
            dialogue_history="\n".join(dialogue),
            num_outline=self.num_outline
        )

        outline, success = writer.call(writer_prompt, success_check_fn=json_parse_outline)
        outline = json.loads(outline)
        # print(outline)
        return outline

    def generate_story_from_outline(self, outline):
        chapter_writer = init_tool_instance({
            "tool": self.llm_type,
            "cfg": self._get_llm_cfg(chapter_writer_system, track_history=False)
        })
        all_pages = []
        for idx, chapter in enumerate(tqdm(outline["story_outline"])):
            chapter_detail, success = chapter_writer.call(
                json.dumps(
                    {
                        "completed_story": all_pages,
                        "current_chapter": chapter
                    },
                    ensure_ascii=False
                ),
                success_check_fn=parse_list,
                temperature=self.temperature
            )
            while success is False:
                chapter_detail, success = chapter_writer.call(
                    json.dumps(
                        {
                            "completed_story": all_pages,
                            "current_chapter": chapter
                        },
                        ensure_ascii=False
                    ),
                    seed=random.randint(0, 100000),
                    temperature=self.temperature,
                    success_check_fn=parse_list
                )
            pages = [page.strip() for page in eval(chapter_detail)]
            all_pages.extend(pages)
        # print(all_pages)
        return all_pages

    def generate_story_from_long_text(self, params):
        """
        改进版：从长文本/文件/目录生成故事页面，支持大规模文档处理
        params 应包含:
        {
            "long_text": "长文本内容..." 或 None,  # 直接文本输入
            "file_path": "path/to/file.txt" 或 None,  # 单个文件路径
            "dir_path": "path/to/directory" 或 None,  # 目录路径（批量处理）
            "target_pages": 8,  # 目标页面数量
            "style": "narrative/tutorial/documentary",  # 可选：视频风格
            "chunk_size": 2000,  # 文本分块大小（字符数）
            "use_summarization": True,  # 是否使用摘要链处理超长文本
        }
        """
        target_pages = params.get("target_pages", 8)
        style = params.get("style", "narrative")
        chunk_size = params.get("chunk_size", 2000)
        use_summarization = params.get("use_summarization", True)
        max_tokens_per_chunk = params.get("max_tokens_per_chunk", 6000)
        
        # 步骤1: 加载文本内容
        print("=" * 60)
        print("步骤1: 加载文本内容...")
        print("=" * 60)
        
        documents = []
        
        # 优先使用文件路径或目录路径
        if params.get("file_path"):
            file_path = Path(params["file_path"])
            print(f"从文件加载: {file_path}")
            documents = load_text_from_source(file_path)
        elif params.get("dir_path"):
            dir_path = Path(params["dir_path"])
            print(f"从目录加载: {dir_path}")
            documents = load_text_from_source(dir_path)
        elif params.get("long_text"):
            # 直接文本输入
            long_text = params.get("long_text", "")
            print(f"使用直接文本输入 (长度: {len(long_text)} 字符)")
            processor = LongTextProcessor(chunk_size=chunk_size)
            doc = Document(
                title="Input Text",
                content=long_text
            )
            documents = [doc]
        else:
            raise ValueError("请提供 long_text、file_path 或 dir_path 之一")
        
        print(f"已加载 {len(documents)} 个文档")
        for i, doc in enumerate(documents):
            print(f"  文档 {i+1}: {doc.title} ({len(doc.content)} 字符)")
        
        # 步骤2: 合并所有文档内容
        all_text = "\n\n".join([doc.content for doc in documents])
        total_chars = len(all_text)
        print(f"\n总文本长度: {total_chars} 字符")
        
        # 🔥 检测输入文本的语言，并保存到 params 中供后续使用
        detected_language = detect_language(all_text)
        params["detected_language"] = detected_language
        print(f"检测到输入文本语言: {'中文' if detected_language == 'zh' else '英文'}")
        
        # 步骤3: 判断是否需要分块处理
        processor = LongTextProcessor(chunk_size=chunk_size)
        should_chunk = processor.should_split(all_text, max_tokens=max_tokens_per_chunk)
        
        if should_chunk:
            print(f"\n文本过长，需要分块处理...")
            print(f"使用分块策略: chunk_size={chunk_size}, max_tokens={max_tokens_per_chunk}")
            
            # 分块处理
            try:
                if use_summarization:
                    # 使用摘要链处理
                    pages = self._generate_from_chunks_with_summarization(
                        all_text, target_pages, style, processor, max_tokens_per_chunk
                    )
                else:
                    # 直接分块处理
                    pages = self._generate_from_chunks_direct(
                        all_text, target_pages, style, processor, max_tokens_per_chunk
                    )
            except Exception as e:
                print(f"⚠️  分块处理失败: {e}，使用备用文本分割策略...")
                pages = self._fallback_split_text(all_text, target_pages)
        else:
            # 文本较短，直接处理
            print(f"\n文本长度适中，直接处理...")
            try:
                pages = self._generate_pages_from_text(all_text, target_pages, style)
            except Exception as e:
                print(f"⚠️  直接处理失败: {e}，使用备用文本分割策略...")
                pages = self._fallback_split_text(all_text, target_pages)
        
        # 🔥 确保返回的页面数量符合要求
        if len(pages) < target_pages:
            print(f"⚠️  生成的页面数量不足 ({len(pages)}/{target_pages})，使用备用策略补充...")
            additional_needed = target_pages - len(pages)
            fallback_pages = self._fallback_split_text(all_text, additional_needed)
            pages.extend(fallback_pages[:additional_needed])
        
        print(f"\n成功生成 {len(pages)} 个故事页面")
        return pages[:target_pages]  # 🔥 确保不超过目标数量
    
    def _generate_pages_from_text(self, text: str, target_pages: int, style: str) -> List[str]:
        """从文本直接生成页面（适用于较短文本）"""
        # 🔥 检测输入文本的语言
        language = detect_language(text)
        print(f"检测到输入文本语言: {'中文' if language == 'zh' else '英文'}")
        
        # 根据语言选择 system prompt 和 prompt
        if language == "zh":
            long_text_analyzer_system = """你是一位擅长分析长文本并将其转换为可视化故事脚本的专家。
你的任务是：
1. 分析输入文本，提取关键信息、主题和叙事元素
2. 将内容划分为适合视觉呈现的逻辑段落
3. 为每个段落生成简洁、生动的描述，便于图像生成
4. 确保段落之间的叙事连续性和连贯性"""
            
            analysis_prompt = f"""请分析以下文本，并将其划分为 {target_pages} 个故事页面用于视频生成。

输入文本：
{text}

重要要求：
- 必须覆盖文本中的所有关键情节和重要事件，不能遗漏任何重要内容
- 如果文本较长，确保均匀分布，每个部分都有对应的页面
- 保持时间顺序和叙事连贯性
- 风格：{style}
- 页面数量：{target_pages}
- 每个页面应该是一个独立的视觉场景
- 关注关键时刻、转折点或重要信息
- 每个页面描述应为2-3句话，生动且适合图像生成

输出格式：一个Python字符串列表，每个字符串是一个页面描述。
示例：["第1页描述...", "第2页描述...", ...]

只输出列表，不要其他文字："""
        else:
            long_text_analyzer_system = """You are an expert in analyzing long texts and converting them into visual story scripts.
Your task is to:
1. Analyze the input text and extract key information, themes, and narrative elements
2. Divide the content into logical segments suitable for visual presentation
3. Generate concise, vivid descriptions for each segment that can be visualized
4. Ensure narrative continuity and coherence across segments"""
            
            analysis_prompt = f"""Analyze the following text and divide it into {target_pages} story pages for video generation.

Input Text:
{text}

Important Requirements:
- MUST cover all key plot points and important events in the text, do not omit any important content
- If the text is long, ensure even distribution so each section has corresponding pages
- Maintain chronological order and narrative coherence
- Style: {style}
- Number of pages: {target_pages}
- Each page should be a self-contained visual scene
- Focus on key moments, turning points, or important information
- Each page description should be 2-3 sentences, vivid and suitable for image generation

Output format: A Python list of strings, where each string is one page description.
Example: ["Page 1 description...", "Page 2 description...", ...]

Output the list only, no other text:"""

        analyzer = init_tool_instance({
            "tool": self.llm_type,
            "cfg": self._get_llm_cfg(long_text_analyzer_system, track_history=False)
        })

        max_llm_tokens = int(self.cfg.get("max_llm_tokens", 4096))
        max_llm_tokens = max(512, min(4096, max_llm_tokens))
        print(f"调用LLM生成 {target_pages} 个故事页面...")
        pages_result, success = analyzer.call(
            analysis_prompt,
            max_length=max_llm_tokens,
            success_check_fn=parse_list,
            temperature=self.temperature
        )
        
        # 重试逻辑
        retry_count = 0
        while success is False and retry_count < 3:
            print(f"重试 {retry_count + 1}/3... (max_length 改为 {max_llm_tokens})")
            pages_result, success = analyzer.call(
                analysis_prompt,
                max_length=max_llm_tokens,
                seed=random.randint(0, 100000),
                temperature=self.temperature,
                success_check_fn=parse_list
            )
            retry_count += 1
        
        if not success or pages_result is None:
            print("LLM生成失败，使用备用文本分割策略...")
            return self._fallback_split_text(text, target_pages)
        
        try:
            # 🔥 修复：确保 pages_result 是字符串
            pages_result_str = str(pages_result).strip() if pages_result else ""
            if not pages_result_str:
                print("LLM输出为空，使用备用文本分割策略...")
                return self._fallback_split_text(text, target_pages)
            
            pages = [page.strip() for page in eval(pages_result_str)]
            return pages[:target_pages] if len(pages) > target_pages else pages
        except Exception as e:
            print(f"解析LLM输出失败 ({e})，使用备用文本分割策略...")
            return self._fallback_split_text(text, target_pages)
    
    def _generate_from_chunks_direct(self, text: str, target_pages: int, style: str, 
                                     processor: LongTextProcessor, max_tokens: int) -> List[str]:
        """直接分块处理：将文本分块，对每块生成页面，然后合并"""
        print("\n使用直接分块处理策略...")
        
        # 分块
        chunks = processor.split_text_intelligent(text, preserve_structure=True)
        print(f"文本已分为 {len(chunks)} 个块")
        
        # 🔥 改进：确保每个块至少有一个页面，即使target_pages较少
        # 如果块数多于目标页数，优先确保每个块都有对应页面
        if len(chunks) > target_pages:
            print(f"⚠️  块数 ({len(chunks)}) 多于目标页数 ({target_pages})，将确保每个块至少有一个页面")
            # 每个块至少1页，剩余页面按重要性分配
            min_pages_per_chunk = 1
            remaining_pages = target_pages - len(chunks)  # 剩余可分配的页面
            if remaining_pages < 0:
                # 如果目标页数少于块数，每个块1页，然后截断
                pages_per_chunk_list = [1] * target_pages
            else:
                # 每个块1页，剩余页面均匀分配
                base_pages = 1
                extra_per_chunk = remaining_pages // len(chunks)
                pages_per_chunk_list = [base_pages + extra_per_chunk] * len(chunks)
                # 将剩余的页面分配给前几个块
                for i in range(remaining_pages % len(chunks)):
                    pages_per_chunk_list[i] += 1
        else:
            # 块数少于等于目标页数，正常分配
            pages_per_chunk = max(1, target_pages // len(chunks))
            pages_per_chunk_list = [pages_per_chunk] * len(chunks)
            # 将剩余的页面分配给前几个块
            remaining_pages = target_pages - sum(pages_per_chunk_list)
            for i in range(remaining_pages):
                pages_per_chunk_list[i] += 1
        
        all_pages = []
        for i, chunk in enumerate(chunks):
            if len(all_pages) >= target_pages:
                break
            
            chunk_pages = pages_per_chunk_list[i] if i < len(pages_per_chunk_list) else 1
            remaining_for_target = target_pages - len(all_pages)
            chunk_pages = min(chunk_pages, remaining_for_target)
            
            if chunk_pages <= 0:
                break
            
            print(f"\n处理块 {i+1}/{len(chunks)} (目标: {chunk_pages} 页，确保覆盖此块内容)...")
            
            try:
                chunk_pages_list = self._generate_pages_from_text(
                    chunk.content, chunk_pages, style
                )
                all_pages.extend(chunk_pages_list)
            except Exception as e:
                print(f"处理块 {i+1} 时出错: {e}，使用备用分割...")
                fallback = self._fallback_split_text(chunk.content, chunk_pages)
                all_pages.extend(fallback[:chunk_pages])
        
        # 🔥 如果页面数不足，从所有块中均匀补充
        if len(all_pages) < target_pages:
            additional_needed = target_pages - len(all_pages)
            print(f"⚠️  页面数不足，从所有块中补充 {additional_needed} 页...")
            # 从每个块中提取更多内容
            for i, chunk in enumerate(chunks):
                if len(all_pages) >= target_pages:
                    break
                # 每个块补充1页
                additional = self._fallback_split_text(chunk.content, 1)
                if additional:
                    all_pages.extend(additional[:1])
        
        return all_pages[:target_pages]
    
    def _generate_from_chunks_with_summarization(self, text: str, target_pages: int, style: str,
                                                  processor: LongTextProcessor, max_tokens: int) -> List[str]:
        """使用摘要链处理：先生成摘要，再从摘要生成页面"""
        print("\n使用摘要链处理策略...")
        
        # 🔥 检测输入文本的语言
        language = detect_language(text)
        print(f"检测到输入文本语言: {'中文' if language == 'zh' else '英文'}")
        
        # 步骤1: 分块
        chunks = processor.split_text_intelligent(text, preserve_structure=True)
        print(f"文本已分为 {len(chunks)} 个块")
        
        # 步骤2: 生成每个块的摘要
        if language == "zh":
            summarizer_system = """你是一位专业的文本摘要专家。你的任务是创建简洁的摘要，保留关键信息、主题和叙事流程。"""
        else:
            summarizer_system = """You are an expert summarizer. Your task is to create concise summaries 
that preserve key information, themes, and narrative flow."""
        
        summarizer = init_tool_instance({
            "tool": self.llm_type,
            "cfg": self._get_llm_cfg(summarizer_system, track_history=False)
        })
        
        summaries = []
        for i, chunk in enumerate(chunks):
            print(f"生成块 {i+1}/{len(chunks)} 的摘要...")
            
            if language == "zh":
                # 🔥 改进：使用更多内容，确保不丢失关键情节
                chunk_content = chunk.content[:5000] if len(chunk.content) > 5000 else chunk.content
                summary_prompt = f"""请为以下文本生成一个详细的摘要，必须保留所有关键情节、重要事件和角色行为：

文本：
{chunk_content}

摘要要求：
- 必须保留所有关键情节和重要事件，不能遗漏任何重要内容
- 保留主要角色的行为和对话要点
- 保留场景转换和情节转折
- 保持叙事连贯性和时间顺序
- 长度控制在300-800字（根据内容重要性调整，重要内容多则更长）
- 如果文本包含多个场景或事件，必须全部包含在摘要中

摘要："""
            else:
                # 🔥 改进：使用更多内容，确保不丢失关键情节
                chunk_content = chunk.content[:5000] if len(chunk.content) > 5000 else chunk.content
                summary_prompt = f"""Please generate a detailed summary for the following text, preserving ALL key plot points, important events, and character actions:

Text:
{chunk_content}

Summary requirements:
- MUST preserve all key plot points and important events, do not omit any important content
- Preserve main character behaviors and dialogue highlights
- Preserve scene transitions and plot twists
- Maintain narrative coherence and chronological order
- Length: 300-800 words (adjust based on content importance, longer for more important content)
- If the text contains multiple scenes or events, ALL must be included in the summary

Summary:"""
            
            try:
                summary, success = summarizer.call(
                    summary_prompt,
                    max_length=1024,
                    temperature=0.5
                )
                # 🔥 修复：检查 summary 是否为 None 或空
                if success and summary:
                    summary_text = str(summary).strip() if summary else ""
                    if summary_text:
                        summaries.append(summary_text)
                    else:
                        print(f"⚠️  摘要为空，使用文本片段")
                        summaries.append(chunk.content[:500] + "...")
                else:
                    # 如果摘要失败，使用文本的前500字符
                    error_msg = "API 调用失败" if not success else "摘要为空"
                    print(f"⚠️  生成摘要失败 ({error_msg})，使用文本片段")
                    summaries.append(chunk.content[:500] + "...")
            except Exception as e:
                print(f"⚠️  生成摘要异常: {e}，使用文本片段")
                summaries.append(chunk.content[:500] + "...")
        
        # 步骤3: 合并摘要并生成页面
        combined_summary = "\n\n".join(summaries)
        print(f"\n合并摘要完成 (总长度: {len(combined_summary)} 字符，共 {len(summaries)} 个块)")
        
        # 🔥 如果摘要太短，直接使用原始文本的备用分割
        if len(combined_summary) < 200:
            print("⚠️  摘要过短，使用原始文本的备用分割策略...")
            return self._fallback_split_text(text, target_pages)
        
        # 🔥 改进：根据块数量调整页面生成策略，确保每个块都有对应页面
        # 如果块数较多，优先确保每个块至少有一个页面
        min_pages_per_chunk = max(1, target_pages // len(chunks)) if chunks else 1
        print(f"每个块至少生成 {min_pages_per_chunk} 个页面，确保覆盖所有内容")
        
        try:
            # 🔥 改进提示词：明确要求覆盖所有关键情节
            if language == "zh":
                enhanced_prompt = f"""请分析以下合并后的摘要，生成 {target_pages} 个故事页面用于视频生成。

合并摘要（来自 {len(chunks)} 个文本块）：
{combined_summary}

重要要求：
- 必须覆盖摘要中的所有关键情节和重要事件，不能遗漏任何内容
- 每个文本块的内容都应该有对应的页面
- 如果某个块包含多个重要场景，应该生成多个页面
- 保持时间顺序和叙事连贯性
- 每个页面描述应为2-3句话，生动且适合图像生成
- 风格：{style}

输出格式：一个Python字符串列表，每个字符串是一个页面描述。
示例：["第1页描述...", "第2页描述...", ...]

只输出列表，不要其他文字："""
            else:
                enhanced_prompt = f"""Analyze the following combined summary and generate {target_pages} story pages for video generation.

Combined Summary (from {len(chunks)} text chunks):
{combined_summary}

Important Requirements:
- MUST cover all key plot points and important events in the summary, do not omit any content
- Each text chunk should have corresponding pages
- If a chunk contains multiple important scenes, generate multiple pages
- Maintain chronological order and narrative coherence
- Each page description should be 2-3 sentences, vivid and suitable for image generation
- Style: {style}

Output format: A Python list of strings, where each string is one page description.
Example: ["Page 1 description...", "Page 2 description...", ...]

Output the list only, no other text:"""
            
            # 使用改进的提示词生成页面
            analyzer = init_tool_instance({
                "tool": self.llm_type,
                "cfg": self._get_llm_cfg(
                    "你是一位擅长分析长文本并将其转换为可视化故事脚本的专家。你的任务是确保所有关键情节都被完整覆盖。",
                    track_history=False
                )
            })
            
            max_llm_tokens = int(self.cfg.get("max_llm_tokens", 4096))
            max_llm_tokens = max(512, min(4096, max_llm_tokens))
            
            pages_result, success = analyzer.call(
                enhanced_prompt,
                max_length=max_llm_tokens,
                success_check_fn=parse_list,
                temperature=self.temperature
            )
            
            if success and pages_result:
                try:
                    pages_result_str = str(pages_result).strip() if pages_result else ""
                    if pages_result_str:
                        pages = [page.strip() for page in eval(pages_result_str)]
                        # 🔥 如果生成的页面数量不足，从原始文本补充
                        if len(pages) < target_pages:
                            print(f"⚠️  生成的页面数量不足 ({len(pages)}/{target_pages})，从原始文本补充...")
                            additional_needed = target_pages - len(pages)
                            fallback_pages = self._fallback_split_text(text, additional_needed)
                            pages.extend(fallback_pages[:additional_needed])
                        return pages[:target_pages]
                except Exception as e:
                    print(f"⚠️  解析页面结果失败: {e}")
            
            # 如果LLM生成失败，使用备用策略
            print("⚠️  LLM生成失败，使用备用分割策略...")
            return self._fallback_split_text(text, target_pages)
            
        except Exception as e:
            print(f"⚠️  从摘要生成页面失败: {e}，使用原始文本的备用分割策略...")
            return self._fallback_split_text(text, target_pages)

    def _fallback_split_text(self, text, target_pages):
        """
        备用方案：如果LLM生成失败，使用简单的文本分割
        """
        if not text or not text.strip():
            # 如果文本为空，返回占位符
            return [f"页面 {i+1}" for i in range(target_pages)]
        
        # 按段落分割
        paragraphs = [p.strip() for p in text.split('\n\n') if p.strip()]
        
        if len(paragraphs) == 0:
            # 如果没有段落，按句子分割
            sentences = [s.strip() + '.' for s in text.split('.') if s.strip()]
            paragraphs = sentences
        
        if len(paragraphs) == 0:
            # 如果还是没有，按字符数分割
            char_per_page = max(100, len(text) // target_pages)
            paragraphs = []
            for i in range(0, len(text), char_per_page):
                chunk = text[i:i+char_per_page].strip()
                if chunk:
                    paragraphs.append(chunk)
        
        # 🔥 修复：确保至少有一个段落
        if len(paragraphs) == 0:
            paragraphs = [text[:500] + "..." if len(text) > 500 else text]
        
        # 如果段落数量接近目标页面数，直接使用（可能需要补充）
        if len(paragraphs) <= target_pages * 1.5:
            pages = paragraphs[:target_pages]
            # 🔥 如果页面数不足，重复最后一个段落或使用文本片段补充
            while len(pages) < target_pages:
                if len(pages) > 0:
                    # 重复最后一个段落
                    pages.append(pages[-1])
                else:
                    # 如果完全没有页面，使用文本片段
                    char_per_page = max(100, len(text) // target_pages)
                    start_idx = len(pages) * char_per_page
                    chunk = text[start_idx:start_idx+char_per_page].strip()
                    if chunk:
                        pages.append(chunk)
                    else:
                        pages.append(f"页面 {len(pages)+1}")
            return pages[:target_pages]
        
        # 否则，合并段落到目标数量
        pages = []
        chunk_size = max(1, len(paragraphs) // target_pages)  # 🔥 修复：确保至少为1
        for i in range(0, len(paragraphs), chunk_size):
            chunk = ' '.join(paragraphs[i:i+chunk_size])
            if chunk.strip():
                pages.append(chunk)
            if len(pages) >= target_pages:
                break
        
        # 🔥 如果页面数不足，补充
        while len(pages) < target_pages:
            if len(pages) > 0:
                pages.append(pages[-1])
            else:
                pages.append(text[:500] + "..." if len(text) > 500 else text)
        
        return pages[:target_pages]

    def call(self, params):
        """
        支持三种模式：
        1. topic模式（原有）：topic -> outline -> pages
        2. long_text模式（改进）：支持文件/目录/直接文本输入 -> pages（跳过outline）
        3. file模式（新增）：file_path/dir_path -> pages
        """
        input_type = params.get("input_type", "topic")
        
        if input_type == "long_text" or params.get("file_path") or params.get("dir_path"):
            # 长文本模式：支持文件、目录或直接文本输入
            print("=" * 60)
            print("使用长文本模式: 文件/目录/文本 -> 故事页面")
            print("=" * 60)
            pages = self.generate_story_from_long_text(params)
        else:
            # 原有模式：topic -> outline -> pages
            print("=" * 60)
            print("使用主题模式: 主题 -> 大纲 -> 故事页面")
            print("=" * 60)
            outline = self.generate_outline(params)
            pages = self.generate_story_from_outline(outline)
        
        return pages