import os
import hashlib
import concurrent.futures
import json
from typing import Dict, Any, List
from datetime import datetime

from models.index_tts import generate_tts
from models.openai_api_image import generate_image_url
from models.prompt_gen import generate_text
from utils.text_processing import split_chapter_into_paragraphs
from utils.file_handlers import download_image, cleanup_temp_files
from utils.video_processing import create_video_from_audio_image
from utils.threading_utils import thread_safe_print
from utils.subtitles import generate_combined_srt
from utils.data_persistence import save_paragraphs_as_text, save_image_prompts_as_text, create_file_index
from utils.consistency_manager import ConsistencyManager
from utils.consistency_validator import ConsistencyValidator
from config import config
import requests
from static_configuration import picture_style, scene_description


def generate_image_prompt(paragraph: str, api_key: str, consistency_manager: ConsistencyManager = None, paragraph_index: int = 0) -> str:
    if consistency_manager:
        # 使用一致性管理器增强的提示词生成
        try:
            # 先用LLM生成基础提示词
            base_prompt = generate_text(
                paragraph,
                api_key,
                base_url=config.llm.base_url,
                model=config.llm.model,
                temperature=config.llm.temperature,
                max_tokens=config.llm.max_tokens,
                system_prompt=config.llm.system_prompt
            )

            # 然后用一致性管理器生成结构化的一致性提示词
            consistent_prompt = consistency_manager.generate_consistent_prompt(paragraph, paragraph_index)

            # 合并两个提示词，保留LLM的创意同时确保一致性
            # 如果LLM生成的提示词包含具体描述，优先使用；否则使用一致性管理器的
            if len(base_prompt.strip()) > 50 and picture_style in base_prompt:
                # LLM生成了有效的提示词，直接使用
                return base_prompt
            else:



                # LLM生成的提示词不够好，使用一致性管理器的
                return consistent_prompt

        except Exception as e:
            print(f"⚠️ 一致性管理器失败，使用默认方式: {e}")

    # 回退到原始方法
    return generate_text(
        paragraph,
        api_key,
        base_url=config.llm.base_url,
        model=config.llm.model,
        temperature=config.llm.temperature,
        max_tokens=config.llm.max_tokens,
        system_prompt=config.llm.system_prompt
    )


def process_single_paragraph(paragraph_data: Dict[str, Any]) -> Dict[str, Any]:
    paragraph = paragraph_data['paragraph']
    i = paragraph_data['index']
    output_dir = paragraph_data['output_dir']
    chapter_hash = paragraph_data['chapter_hash']
    animation_type = paragraph_data['animation_type']
    temp_files = paragraph_data['temp_files']
    api_key = paragraph_data['api_key']

    # 为每个段落创建子目录
    segment_dir = os.path.join(output_dir, f"segment_{i + 1:02d}")
    os.makedirs(segment_dir, exist_ok=True)

    try:
        thread_safe_print(f"🎬 [段落 {i + 1}] 开始处理: {paragraph[:50]}...")

        thread_safe_print(f"🎨 [段落 {i + 1}] 正在生成图片提示词...")
        try:
            # 从paragraph_data中获取一致性管理器和索引
            consistency_manager = paragraph_data.get('consistency_manager')
            image_prompt = generate_image_prompt(paragraph, api_key, consistency_manager, i)
            thread_safe_print(f"✅ [段落 {i + 1}] 图片提示词生成成功: {image_prompt[:100]}...")

            # 保存分段文字和生图提示词到段落子目录
            segment_data = {
                "index": i + 1,
                "paragraph": paragraph,
                "image_prompt": image_prompt,
                "timestamp": datetime.now().isoformat()
            }

            # 保存JSON数据
            segment_json_path = os.path.join(segment_dir, f"segment_{chapter_hash}_{i}.json")
            with open(segment_json_path, 'w', encoding='utf-8') as f:
                json.dump(segment_data, f, ensure_ascii=False, indent=2)

            # 保存原始文本
            segment_text_path = os.path.join(segment_dir, f"paragraph_{i + 1:02d}.txt")
            with open(segment_text_path, 'w', encoding='utf-8') as f:
                f.write(f"# 段落 {i + 1}\n\n")
                f.write(paragraph)

            # 保存生图提示词
            prompt_text_path = os.path.join(segment_dir, f"prompt_{i + 1:02d}.txt")
            with open(prompt_text_path, 'w', encoding='utf-8') as f:
                f.write(f"# 段落 {i + 1} - 生图提示词\n\n")
                f.write(image_prompt)

            temp_files.extend([segment_json_path, segment_text_path, prompt_text_path])
            thread_safe_print(f"💾 [段落 {i + 1}] 段落数据已保存: {segment_dir}")

            thread_safe_print(f"🖼️ [段落 {i + 1}] 正在调用图片生成API...")
            image_url = generate_image_url(
                image_prompt,
                api_key=api_key,
                model=config.image_gen.model,
                size=config.image_gen.size,
                base_url=config.image_gen.base_url
            )
            thread_safe_print(f"✅ [段落 {i + 1}] 图片生成成功")

            # 将图片保存到段落子目录
            image_filename = os.path.join(segment_dir, f"image_{i + 1:02d}.jpg")
            thread_safe_print(f"📥 [段落 {i + 1}] 正在下载图片...")
            image_path = download_image(image_url, image_filename)
            temp_files.append(image_path)
            thread_safe_print(f"✅ [段落 {i + 1}] 图片下载完成: {image_path}")

            # 更新segment数据，添加图片路径
            with open(segment_json_path, 'r', encoding='utf-8') as f:
                segment_data = json.load(f)
            segment_data["image_path"] = image_path
            segment_data["image_url"] = image_url
            segment_data["segment_directory"] = segment_dir
            with open(segment_json_path, 'w', encoding='utf-8') as f:
                json.dump(segment_data, f, ensure_ascii=False, indent=2)
            thread_safe_print(f"💾 [段落 {i + 1}] 图片信息已保存到段落数据")

        except Exception as e:
            thread_safe_print(f"❌ [段落 {i + 1}] 图片生成失败: {e}")
            return {
                "success": False,
                "index": i,
                "error": f"图片生成失败: {str(e)}",
                "segment_path": None
            }

        try:
            thread_safe_print(f"🎵 [段落 {i + 1}] 正在生成音频，文本长度: {len(paragraph)} 字符")

            # 将音频保存到段落子目录
            audio_filename = os.path.join(segment_dir, f"audio_{i + 1:02d}.wav")
            thread_safe_print(f"🎤 [段落 {i + 1}] 正在调用TTS API...")
            audio_path = generate_tts(
                paragraph,
                audio_filename,
                api_url=config.tts.api_url,
                voice_type=config.tts.default_voice_type,
                voice_mapping=config.tts.voice_mapping
            )
            temp_files.append(audio_path)
            thread_safe_print(f"✅ [段落 {i + 1}] 音频生成成功")

            # 将视频保存到段落子目录
            video_filename = os.path.join(segment_dir, f"video_{i + 1:02d}.mp4")
            # 同时在根目录保存一份用于合并
            video_filename_root = os.path.join(output_dir, f"segment_{chapter_hash}_{i}.mp4")
            thread_safe_print(f"🎞️ [段落 {i + 1}] 正在合成视频...")

            # 先在子目录中生成视频
            segment_path = create_video_from_audio_image(
                audio_path, image_path, video_filename, paragraph, animation_type
            )

            # 复制到根目录用于合并
            import shutil
            shutil.copy2(segment_path, video_filename_root)
            segment_path_for_merge = video_filename_root

            thread_safe_print(f"✅ [段落 {i + 1}] 视频段生成成功")

            # 更新segment数据，添加音频和视频路径
            with open(segment_json_path, 'r', encoding='utf-8') as f:
                segment_data = json.load(f)
            segment_data["audio_path"] = audio_path
            segment_data["video_path"] = segment_path
            segment_data["video_path_for_merge"] = segment_path_for_merge
            segment_data["processing_completed"] = datetime.now().isoformat()
            with open(segment_json_path, 'w', encoding='utf-8') as f:
                json.dump(segment_data, f, ensure_ascii=False, indent=2)
            thread_safe_print(f"💾 [段落 {i + 1}] 处理完成数据已保存")

            return {
                "success": True,
                "index": i,
                "segment_path": segment_path_for_merge,  # 用于合并的文件路径
                "segment_directory": segment_dir,
                "segment_data_path": segment_json_path,
                "error": None
            }

        except requests.exceptions.RequestException as e:
            thread_safe_print(f"❌ [段落 {i + 1}] TTS API请求失败: {e}")
            return {
                "success": False,
                "index": i,
                "error": f"TTS API请求失败: {str(e)}",
                "segment_path": None
            }
        except Exception as e:
            thread_safe_print(f"❌ [段落 {i + 1}] 音频或视频生成失败: {e}")
            return {
                "success": False,
                "index": i,
                "error": f"音频或视频生成失败: {str(e)}",
                "segment_path": None
            }

    except Exception as e:
        thread_safe_print(f"❌ [段落 {i + 1}] 处理失败: {e}")
        return {
            "success": False,
            "index": i,
            "error": str(e),
            "segment_path": None
        }

# book_name, chapter_name, chapter_content, api_key
def process_chapter(chapter_content: str, api_key: str, base_output_dir: str = "output", book_name: str = '', chapter_name: str = '') -> Dict[str, Any]:
    if book_name and chapter_name:
        base_output_dir = base_output_dir + '/' + book_name + '/' + chapter_name
    print(f"ARK_API_KEY: {'已设置' if api_key else '未设置'}")
    print(f'初始输出目录{base_output_dir}')
    if not api_key:
        return {
            "success": False,
            "error": "ARK_API_KEY 不能为空",
            "segments_count": 0,
            "paragraphs_count": 0,
        }

    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_dir = os.path.join(base_output_dir, f"video_{timestamp}")
    os.makedirs(output_dir, exist_ok=True)
    print(f"输出目录: {output_dir}")
    print(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")

    chapter_hash = hashlib.md5(chapter_content.encode()).hexdigest()[:8]
    paragraphs = split_chapter_into_paragraphs(chapter_content)

    # 保存原始章节内容和分段结果
    chapter_data = {
        "timestamp": timestamp,
        "chapter_hash": chapter_hash,
        "original_content": chapter_content,
        "paragraphs": paragraphs,
        "paragraphs_count": len(paragraphs),
        "output_dir": output_dir
    }
    chapter_json_path = os.path.join(output_dir, f"chapter_{chapter_hash}.json")
    with open(chapter_json_path, 'w', encoding='utf-8') as f:
        json.dump(chapter_data, f, ensure_ascii=False, indent=2)
    print(f"📄 章节数据已保存: {chapter_json_path}")

    if not paragraphs:
        return {
            "success": False,
            "error": "章节内容为空或无法分割成段落",
            "segments_count": 0,
            "paragraphs_count": 0,
        }

    # 初始化一致性管理器
    consistency_manager = ConsistencyManager()
    try:
        consistency_context = consistency_manager.analyze_chapter_context(chapter_content)
        consistency_manager.save_context(output_dir, chapter_hash)
        print(f"🎨 一致性上下文已初始化：")
        print(f"  - 主要角色: {', '.join(consistency_context.characters.keys())}")
        print(f"  - 世界设定: {consistency_context.world_setting}")
        print(f"  - 主要场景: {consistency_context.main_scene.location}")
    except Exception as e:
        print(f"⚠️ 一致性管理器初始化失败: {e}")
        consistency_manager = None

    # 立即保存分段文字
    paragraphs_file = save_paragraphs_as_text(paragraphs, output_dir, chapter_hash)
    print(f"📝 分段文字已保存: {paragraphs_file}")

    print(f"📊 开始并发处理 {len(paragraphs)} 个段落...")
    video_segments = [None] * len(paragraphs)
    image_prompts = [None] * len(paragraphs)
    temp_files = []

    paragraph_tasks = []
    for i, paragraph in enumerate(paragraphs):
        animation_type = config.processing.animation_effects[i % len(config.processing.animation_effects)]
        paragraph_data = {
            'paragraph': paragraph,
            'index': i,
            'output_dir': output_dir,
            'chapter_hash': chapter_hash,
            'animation_type': animation_type,
            'temp_files': temp_files,
            'api_key': api_key,
            'consistency_manager': consistency_manager
        }
        paragraph_tasks.append(paragraph_data)

    try:
        max_workers = min(config.processing.max_concurrent_workers, len(paragraphs))
        print(f"🚀 启动 {max_workers} 个并发工作线程")

        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_index = {
                executor.submit(process_single_paragraph, task_data): task_data['index']
                for task_data in paragraph_tasks
            }

            completed_count = 0
            failed_count = 0

            for future in concurrent.futures.as_completed(future_to_index):
                index = future_to_index[future]
                try:
                    result = future.result()
                    if result['success']:
                        video_segments[index] = result['segment_path']
                        # 从segment数据文件中读取image_prompt
                        if result.get('segment_data_path') and os.path.exists(result['segment_data_path']):
                            with open(result['segment_data_path'], 'r', encoding='utf-8') as f:
                                segment_data = json.load(f)
                                image_prompts[index] = segment_data.get('image_prompt', '')
                        completed_count += 1
                        thread_safe_print(f"🎉 [进度] {completed_count}/{len(paragraphs)} 个段落处理完成")
                    else:
                        failed_count += 1
                        thread_safe_print(f"⚠️ [段落 {index + 1}] 处理失败: {result['error']}")

                except Exception as e:
                    failed_count += 1
                    thread_safe_print(f"❌ [段落 {index + 1}] 处理异常: {e}")

            video_segments = [seg for seg in video_segments if seg is not None]

            print(f"\n📈 并发处理完成统计:")
            print(f"✅ 成功: {completed_count} 个段落")
            print(f"❌ 失败: {failed_count} 个段落")
            print(f"🎬 可用视频段: {len(video_segments)} 个")

        final_output = None
        if len(video_segments) > 1:
            from moviepy import concatenate_videoclips, VideoFileClip

            clips = [VideoFileClip(segment) for segment in video_segments]
            final_video = concatenate_videoclips(clips)

            final_output = os.path.join(output_dir, f"chapter_{chapter_hash}_final.mp4")
            final_video.write_videofile(
                final_output, fps=config.video.default_fps, audio_codec="aac", codec="libx264"
            )

            print("正在生成合并视频的SRT字幕文件...")
            combined_srt_content = generate_combined_srt(paragraphs, clips)
            combined_srt_filename = final_output.replace(".mp4", ".srt")
            with open(combined_srt_filename, "w", encoding="utf-8") as f:
                f.write(combined_srt_content)
            print(f"合并SRT字幕文件已保存: {combined_srt_filename}")

            # 保存生图提示词并进行一致性检查
            valid_prompts = [prompt for prompt in image_prompts if prompt]
            if valid_prompts:
                prompts_file = save_image_prompts_as_text(valid_prompts, output_dir, chapter_hash)
                print(f"🎨 生图提示词已保存: {prompts_file}")

                # 进行一致性检查
                validator = ConsistencyValidator()
                issues = validator.validate_prompts(valid_prompts)
                consistency_report = validator.generate_consistency_report(issues)

                # 保存一致性报告
                consistency_report_file = os.path.join(output_dir, f"consistency_report_{chapter_hash}.txt")
                with open(consistency_report_file, 'w', encoding='utf-8') as f:
                    f.write(f"图片一致性检查报告\n")
                    f.write(f"=" * 50 + "\n\n")
                    f.write(consistency_report)
                print(f"🔍 一致性检查报告已保存: {consistency_report_file}")

                # 在控制台输出简要报告
                if issues:
                    print(f"⚠️ 发现 {len(issues)} 个一致性问题，详细信息请查看: {consistency_report_file}")
                else:
                    print(f"✅ 一致性检查通过！")

            for clip in clips:
                clip.close()
            final_video.close()

        elif len(video_segments) == 1:
            final_output = video_segments[0]

        # 生成最终处理报告
        final_report = {
            "success": True,
            "output_file": final_output,
            "output_dir": output_dir,
            "timestamp": timestamp,
            "chapter_hash": chapter_hash,
            "segments_count": len(video_segments),
            "paragraphs_count": len(paragraphs),
            "completed_segments": completed_count,
            "failed_segments": failed_count,
            "processing_completed": datetime.now().isoformat(),
            "files_generated": {
                "chapter_data": chapter_json_path,
                "final_video": final_output,
                "srt_subtitle": combined_srt_filename if len(video_segments) > 1 else None,
                "segment_data_files": [os.path.join(output_dir, f"segment_{chapter_hash}_{i}.json") for i in range(len(paragraphs))],
                "individual_segments": video_segments
            }
        }

        report_path = os.path.join(output_dir, f"processing_report_{chapter_hash}.json")
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(final_report, f, ensure_ascii=False, indent=2)
        print(f"📊 处理报告已保存: {report_path}")

        # 创建文件索引
        index_path = create_file_index(output_dir, chapter_hash, final_report)
        print(f"📋 文件索引已创建: {index_path}")

        return final_report

    except Exception as e:
        error_report = {
            "success": False,
            "error": str(e),
            "output_dir": output_dir,
            "timestamp": timestamp,
            "chapter_hash": chapter_hash,
            "segments_count": len(video_segments),
            "paragraphs_count": len(paragraphs),
            "processing_failed": datetime.now().isoformat()
        }

        error_report_path = os.path.join(output_dir, f"error_report_{chapter_hash}.json")
        with open(error_report_path, 'w', encoding='utf-8') as f:
            json.dump(error_report, f, ensure_ascii=False, indent=2)
        print(f"❌ 错误报告已保存: {error_report_path}")

        return error_report

    finally:
        cleanup_temp_files(temp_files)
