import os
import hashlib
import concurrent.futures
import json
from typing import Dict, Any, List
from datetime import datetime

from models.index_tts import generate_tts
from models.openai_api_image import generate_image_url
from models.prompt_gen import generate_text
from utils.text_processing import split_chapter_into_paragraphs
from utils.file_handlers import download_image, cleanup_temp_files
from utils.video_processing import create_video_from_audio_image, embed_srt_to_video, burn_srt_to_video
from utils.threading_utils import thread_safe_print
from utils.subtitles import generate_combined_srt, generate_combined_srt_fast
from utils.data_persistence import save_paragraphs_as_text, save_image_prompts_as_text, create_file_index
from config import config
import requests


def generate_image_prompt(paragraph: str, api_key: str, paragraph_index: int = 0) -> str:
    """
    为段落生成图片提示词
    
    Args:
        paragraph: 段落文本
        api_key: API密钥
        paragraph_index: 段落索引（保留兼容性，但不再使用）
        
    Returns:
        生成的图片提示词
    """
    return generate_text(
        paragraph, 
        api_key, 
        base_url=config.llm.base_url,
        model=config.llm.model,
        temperature=config.llm.temperature,
        max_tokens=config.llm.max_tokens,
        system_prompt=config.llm.system_prompt
    )


def process_single_paragraph(paragraph_data: Dict[str, Any]) -> Dict[str, Any]:
    paragraph = paragraph_data['paragraph']
    i = paragraph_data['index']
    output_dir = paragraph_data['output_dir']
    chapter_hash = paragraph_data['chapter_hash']
    animation_type = paragraph_data['animation_type']
    temp_files = paragraph_data['temp_files']
    api_key = paragraph_data['api_key']
    
    # 为每个段落创建子目录
    segment_dir = os.path.join(output_dir, f"segment_{i+1:02d}")
    os.makedirs(segment_dir, exist_ok=True)
    
    try:
        thread_safe_print(f"🎬 [段落 {i+1}] 开始处理: {paragraph[:50]}...")
        
        thread_safe_print(f"🎨 [段落 {i+1}] 正在生成图片提示词...")
        try:
            # 生成图片提示词
            image_prompt = generate_image_prompt(paragraph, api_key, i)
            thread_safe_print(f"✅ [段落 {i+1}] 图片提示词生成成功: {image_prompt[:100]}...")
            
            # 保存分段文字和生图提示词到段落子目录
            segment_data = {
                "index": i + 1,
                "paragraph": paragraph,
                "image_prompt": image_prompt,
                "timestamp": datetime.now().isoformat()
            }
            
            # 保存JSON数据
            segment_json_path = os.path.join(segment_dir, f"segment_{chapter_hash}_{i}.json")
            with open(segment_json_path, 'w', encoding='utf-8') as f:
                json.dump(segment_data, f, ensure_ascii=False, indent=2)
            
            # 保存原始文本
            segment_text_path = os.path.join(segment_dir, f"paragraph_{i+1:02d}.txt")
            with open(segment_text_path, 'w', encoding='utf-8') as f:
                f.write(f"# 段落 {i+1}\n\n")
                f.write(paragraph)
            
            # 保存生图提示词
            prompt_text_path = os.path.join(segment_dir, f"prompt_{i+1:02d}.txt")
            with open(prompt_text_path, 'w', encoding='utf-8') as f:
                f.write(f"# 段落 {i+1} - 生图提示词\n\n")
                f.write(image_prompt)
            
            temp_files.extend([segment_json_path, segment_text_path, prompt_text_path])
            thread_safe_print(f"💾 [段落 {i+1}] 段落数据已保存: {segment_dir}")

            thread_safe_print(f"🖼️ [段落 {i+1}] 正在调用图片生成API...")
            image_url = generate_image_url(
                image_prompt, 
                api_key=api_key,
                model=config.image_gen.model,
                size=config.image_gen.size,
                base_url=config.image_gen.base_url
            )
            thread_safe_print(f"✅ [段落 {i+1}] 图片生成成功")

            # 将图片保存到段落子目录
            image_filename = os.path.join(segment_dir, f"image_{i+1:02d}.jpg")
            thread_safe_print(f"📥 [段落 {i+1}] 正在下载图片...")
            image_path = download_image(image_url, image_filename)
            temp_files.append(image_path)
            thread_safe_print(f"✅ [段落 {i+1}] 图片下载完成: {image_path}")
            
            # 更新segment数据，添加图片路径
            with open(segment_json_path, 'r', encoding='utf-8') as f:
                segment_data = json.load(f)
            segment_data["image_path"] = image_path
            segment_data["image_url"] = image_url
            segment_data["segment_directory"] = segment_dir
            with open(segment_json_path, 'w', encoding='utf-8') as f:
                json.dump(segment_data, f, ensure_ascii=False, indent=2)
            thread_safe_print(f"💾 [段落 {i+1}] 图片信息已保存到段落数据")

        except Exception as e:
            thread_safe_print(f"❌ [段落 {i+1}] 图片生成失败: {e}")
            return {
                "success": False,
                "index": i,
                "error": f"图片生成失败: {str(e)}",
                "segment_path": None
            }

        try:
            thread_safe_print(f"🎵 [段落 {i+1}] 正在生成音频，文本长度: {len(paragraph)} 字符")

            # 将音频保存到段落子目录
            audio_filename = os.path.join(segment_dir, f"audio_{i+1:02d}.wav")
            thread_safe_print(f"🎤 [段落 {i+1}] 正在调用TTS API...")
            audio_path = generate_tts(
                paragraph, 
                audio_filename,
                api_url=config.tts.api_url,
                voice_type=config.tts.default_voice_type,
                voice_mapping=config.tts.voice_mapping
            )
            temp_files.append(audio_path)
            thread_safe_print(f"✅ [段落 {i+1}] 音频生成成功")

            # 直接在根目录生成用于合并的视频文件
            video_filename_root = os.path.join(output_dir, f"segment_{chapter_hash}_{i}.mp4")
            thread_safe_print(f"🎞️ [段落 {i+1}] 正在合成视频...")

            # 直接生成到根目录，避免文件复制
            segment_path = create_video_from_audio_image(
                audio_path, image_path, video_filename_root, paragraph, animation_type
            )
            
            # 同时在子目录创建硬链接（节省空间）
            video_filename_segment = os.path.join(segment_dir, f"video_{i+1:02d}.mp4")
            try:
                os.link(segment_path, video_filename_segment)
            except OSError:
                # 如果硬链接失败（比如跨文件系统），则使用符号链接
                os.symlink(segment_path, video_filename_segment)
            
            segment_path_for_merge = segment_path
            
            thread_safe_print(f"✅ [段落 {i+1}] 视频段生成成功")
            
            # 更新segment数据，添加音频和视频路径
            with open(segment_json_path, 'r', encoding='utf-8') as f:
                segment_data = json.load(f)
            segment_data["audio_path"] = audio_path
            segment_data["video_path"] = video_filename_segment
            segment_data["video_path_for_merge"] = segment_path_for_merge
            segment_data["processing_completed"] = datetime.now().isoformat()
            with open(segment_json_path, 'w', encoding='utf-8') as f:
                json.dump(segment_data, f, ensure_ascii=False, indent=2)
            thread_safe_print(f"💾 [段落 {i+1}] 处理完成数据已保存")
            
            return {
                "success": True,
                "index": i,
                "segment_path": segment_path_for_merge,  # 用于合并的文件路径
                "segment_directory": segment_dir,
                "segment_data_path": segment_json_path,
                "error": None
            }

        except requests.exceptions.RequestException as e:
            thread_safe_print(f"❌ [段落 {i+1}] TTS API请求失败: {e}")
            return {
                "success": False,
                "index": i,
                "error": f"TTS API请求失败: {str(e)}",
                "segment_path": None
            }
        except Exception as e:
            thread_safe_print(f"❌ [段落 {i+1}] 音频或视频生成失败: {e}")
            return {
                "success": False,
                "index": i,
                "error": f"音频或视频生成失败: {str(e)}",
                "segment_path": None
            }

    except Exception as e:
        thread_safe_print(f"❌ [段落 {i+1}] 处理失败: {e}")
        return {
            "success": False,
            "index": i,
            "error": str(e),
            "segment_path": None
        }


def process_chapter(chapter_content: str, api_key: str, base_output_dir: str = "output") -> Dict[str, Any]:
    print(f"ARK_API_KEY: {'已设置' if api_key else '未设置'}")

    if not api_key:
        return {
            "success": False,
            "error": "ARK_API_KEY 不能为空",
            "segments_count": 0,
            "paragraphs_count": 0,
        }

    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    output_dir = os.path.join(base_output_dir, f"video_{timestamp}")
    os.makedirs(output_dir, exist_ok=True)
    print(f"输出目录: {output_dir}")
    print(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")

    chapter_hash = hashlib.md5(chapter_content.encode()).hexdigest()[:8]
    paragraphs = split_chapter_into_paragraphs(chapter_content)
    
    # 保存原始章节内容和分段结果
    chapter_data = {
        "timestamp": timestamp,
        "chapter_hash": chapter_hash,
        "original_content": chapter_content,
        "paragraphs": paragraphs,
        "paragraphs_count": len(paragraphs),
        "output_dir": output_dir
    }
    chapter_json_path = os.path.join(output_dir, f"chapter_{chapter_hash}.json")
    with open(chapter_json_path, 'w', encoding='utf-8') as f:
        json.dump(chapter_data, f, ensure_ascii=False, indent=2)
    print(f"📄 章节数据已保存: {chapter_json_path}")
    
    if not paragraphs:
        return {
            "success": False,
            "error": "章节内容为空或无法分割成段落",
            "segments_count": 0,
            "paragraphs_count": 0,
        }
    
    # 移除一致性管理器逻辑
    
    # 立即保存分段文字
    paragraphs_file = save_paragraphs_as_text(paragraphs, output_dir, chapter_hash)
    print(f"📝 分段文字已保存: {paragraphs_file}")
    
    print(f"📊 开始并发处理 {len(paragraphs)} 个段落...")
    video_segments = [None] * len(paragraphs)
    image_prompts = [None] * len(paragraphs)
    temp_files = []
    
    paragraph_tasks = []
    for i, paragraph in enumerate(paragraphs):
        animation_type = config.processing.animation_effects[i % len(config.processing.animation_effects)]
        paragraph_data = {
            'paragraph': paragraph,
            'index': i,
            'output_dir': output_dir,
            'chapter_hash': chapter_hash,
            'animation_type': animation_type,
            'temp_files': temp_files,
            'api_key': api_key
        }
        paragraph_tasks.append(paragraph_data)
    
    try:
        max_workers = min(config.processing.max_concurrent_workers, len(paragraphs))
        print(f"🚀 启动 {max_workers} 个并发工作线程")
        
        with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
            future_to_index = {
                executor.submit(process_single_paragraph, task_data): task_data['index']
                for task_data in paragraph_tasks
            }
            
            completed_count = 0
            failed_count = 0
            
            for future in concurrent.futures.as_completed(future_to_index):
                index = future_to_index[future]
                try:
                    result = future.result()
                    if result['success']:
                        video_segments[index] = result['segment_path']
                        # 从segment数据文件中读取image_prompt
                        if result.get('segment_data_path') and os.path.exists(result['segment_data_path']):
                            with open(result['segment_data_path'], 'r', encoding='utf-8') as f:
                                segment_data = json.load(f)
                                image_prompts[index] = segment_data.get('image_prompt', '')
                        completed_count += 1
                        thread_safe_print(f"🎉 [进度] {completed_count}/{len(paragraphs)} 个段落处理完成")
                    else:
                        failed_count += 1
                        thread_safe_print(f"⚠️ [段落 {index+1}] 处理失败: {result['error']}")
                        
                except Exception as e:
                    failed_count += 1
                    thread_safe_print(f"❌ [段落 {index+1}] 处理异常: {e}")
            
            video_segments = [seg for seg in video_segments if seg is not None]
            
            print(f"\n📈 并发处理完成统计:")
            print(f"✅ 成功: {completed_count} 个段落")
            print(f"❌ 失败: {failed_count} 个段落")
            print(f"🎬 可用视频段: {len(video_segments)} 个")

        final_output = None
        if len(video_segments) > 1:
            # 使用FFmpeg快速合并视频（无需重新编码）
            final_output = os.path.join(output_dir, f"chapter_{chapter_hash}_final.mp4")
            
            # 创建FFmpeg拼接文件列表
            concat_list_file = os.path.join(output_dir, f"concat_list_{chapter_hash}.txt")
            with open(concat_list_file, 'w', encoding='utf-8') as f:
                for segment in video_segments:
                    # FFmpeg要求绝对路径或相对路径需要转义
                    abs_path = os.path.abspath(segment)
                    f.write(f"file '{abs_path}'\n")
            
            print(f"🚀 使用FFmpeg快速合并 {len(video_segments)} 个视频段...")
            
            # 使用FFmpeg的concat demuxer进行快速合并（无需重新编码）
            import subprocess
            try:
                ffmpeg_cmd = [
                    'ffmpeg', '-f', 'concat', '-safe', '0', 
                    '-i', concat_list_file,
                    '-c', 'copy',  # 直接复制流，不重新编码
                    '-y',  # 覆盖输出文件
                    final_output
                ]
                
                result = subprocess.run(ffmpeg_cmd, 
                                      capture_output=True, 
                                      text=True, 
                                      check=True)
                print(f"✅ FFmpeg快速合并完成: {final_output}")
                
                # 删除临时拼接列表文件
                os.remove(concat_list_file)
                
            except subprocess.CalledProcessError as e:
                print(f"❌ FFmpeg合并失败，回退到MoviePy方案: {e}")
                # 回退到原有的MoviePy方案
                from moviepy import concatenate_videoclips, VideoFileClip
                clips = []
                try:
                    for segment in video_segments:
                        clips.append(VideoFileClip(segment))
                    final_video = concatenate_videoclips(clips, method="compose")
                    final_video.write_videofile(
                        final_output, fps=config.video.default_fps, audio_codec="aac", codec="libx264"
                    )
                finally:
                    try:
                        if 'final_video' in locals():
                            final_video.close()
                    except Exception:
                        pass
                    for clip in clips:
                        try:
                            clip.close()
                        except Exception:
                            pass
            
            # 生成合并视频的SRT字幕文件
            print("正在生成合并视频的SRT字幕文件...")
            # 为FFmpeg方案重新实现SRT生成（不依赖MoviePy clips）
            combined_srt_content = generate_combined_srt_fast(paragraphs, video_segments)
            combined_srt_filename = final_output.replace(".mp4", ".srt")
            with open(combined_srt_filename, "w", encoding="utf-8") as f:
                f.write(combined_srt_content)
            print(f"合并SRT字幕文件已保存: {combined_srt_filename}")
            
            # 保存生图提示词并进行一致性检查
            valid_prompts = [prompt for prompt in image_prompts if prompt]
            if valid_prompts:
                prompts_file = save_image_prompts_as_text(valid_prompts, output_dir, chapter_hash)
                print(f"🎨 生图提示词已保存: {prompts_file}")
                
                # 移除一致性检查逻辑

            # 字幕处理：优先硬字幕
            if config.subtitle.hardcode_to_video:
                try:
                    if os.path.exists(combined_srt_filename):
                        burned_output = final_output.replace(".mp4", "_hardsub.mp4")
                        final_output = burn_srt_to_video(final_output, combined_srt_filename, burned_output)
                        print(f"🎬 已生成硬字幕视频: {final_output}")
                    else:
                        print("⚠️ 未找到用于烧录的SRT字幕文件")
                except Exception as e:
                    print(f"⚠️ 硬字幕烧录失败: {e}")
            elif config.subtitle.embed_to_video:
                try:
                    embedded_output = final_output.replace(".mp4", "_subs.mp4")
                    final_output = embed_srt_to_video(final_output, combined_srt_filename, embedded_output)
                    print(f"🎬 已生成内嵌字幕视频: {final_output}")
                except Exception as e:
                    print(f"⚠️ 内嵌字幕失败: {e}")

        elif len(video_segments) == 1:
            final_output = video_segments[0]

            # 查找单段对应SRT
            srt_path = final_output.replace(".mp4", ".srt")
            if not os.path.exists(srt_path):
                srt_path = None
                for i in range(len(paragraphs)):
                    seg_json = os.path.join(output_dir, f"segment_{chapter_hash}_{i}.json")
                    if os.path.exists(seg_json):
                        with open(seg_json, 'r', encoding='utf-8') as f:
                            seg_data = json.load(f)
                        if seg_data.get("video_path_for_merge") == final_output:
                            seg_video_path = seg_data.get("video_path")
                            if seg_video_path:
                                candidate = seg_video_path.replace(".mp4", ".srt")
                                if os.path.exists(candidate):
                                    srt_path = candidate
                                    break

            # 字幕处理：优先硬字幕
            if srt_path and config.subtitle.hardcode_to_video:
                try:
                    burned_output = final_output.replace(".mp4", "_hardsub.mp4")
                    final_output = burn_srt_to_video(final_output, srt_path, burned_output)
                    print(f"🎬 已生成硬字幕视频: {final_output}")
                except Exception as e:
                    print(f"⚠️ 硬字幕烧录失败: {e}")
            elif srt_path and config.subtitle.embed_to_video:
                try:
                    embedded_output = final_output.replace(".mp4", "_subs.mp4")
                    final_output = embed_srt_to_video(final_output, srt_path, embedded_output)
                    print(f"🎬 已生成内嵌字幕视频: {final_output}")
                except Exception as e:
                    print(f"⚠️ 内嵌字幕失败: {e}")
            else:
                if not srt_path:
                    print("ℹ️ 未找到可用于字幕处理的SRT文件")

        # 生成最终处理报告
        final_report = {
            "success": True,
            "output_file": final_output,
            "output_dir": output_dir,
            "timestamp": timestamp,
            "chapter_hash": chapter_hash,
            "segments_count": len(video_segments),
            "paragraphs_count": len(paragraphs),
            "completed_segments": completed_count,
            "failed_segments": failed_count,
            "processing_completed": datetime.now().isoformat(),
            "files_generated": {
                "chapter_data": chapter_json_path,
                "final_video": final_output,
                "srt_subtitle": combined_srt_filename if len(video_segments) > 1 else None,
                "segment_data_files": [os.path.join(output_dir, f"segment_{chapter_hash}_{i}.json") for i in range(len(paragraphs))],
                "individual_segments": video_segments,
                "embedded_final_video": final_output if (not config.subtitle.hardcode_to_video and config.subtitle.embed_to_video) else None,
                "hardcoded_final_video": final_output if config.subtitle.hardcode_to_video else None
            }
        }
        
        report_path = os.path.join(output_dir, f"processing_report_{chapter_hash}.json")
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(final_report, f, ensure_ascii=False, indent=2)
        print(f"📊 处理报告已保存: {report_path}")
        
        # 创建文件索引
        index_path = create_file_index(output_dir, chapter_hash, final_report)
        print(f"📋 文件索引已创建: {index_path}")
        
        return final_report

    except Exception as e:
        error_report = {
            "success": False,
            "error": str(e),
            "output_dir": output_dir,
            "timestamp": timestamp,
            "chapter_hash": chapter_hash,
            "segments_count": len(video_segments),
            "paragraphs_count": len(paragraphs),
            "processing_failed": datetime.now().isoformat()
        }
        
        error_report_path = os.path.join(output_dir, f"error_report_{chapter_hash}.json")
        with open(error_report_path, 'w', encoding='utf-8') as f:
            json.dump(error_report, f, ensure_ascii=False, indent=2)
        print(f"❌ 错误报告已保存: {error_report_path}")
        
        return error_report

    finally:
        cleanup_temp_files(temp_files)