"""
图像平滑过渡模块
实现类似 DiffMorpher 的图像到图像平滑过渡效果
支持多种过渡方法：光流法、图像插值、深度学习模型等
"""

from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import cv2
from moviepy.editor import ImageClip, VideoClip, concatenate_videoclips
from tqdm import trange


def calculate_optical_flow(img1: np.ndarray, img2: np.ndarray, 
                          method: str = "farneback") -> np.ndarray:
    """
    计算两张图像之间的光流（优化版）
    
    Args:
        img1: 第一张图像 (BGR格式)
        img2: 第二张图像 (BGR格式)
        method: 光流计算方法 ("farneback" 或 "lucas_kanade")
    
    Returns:
        flow: 光流向量场 (H, W, 2)
    """
    # 转换为灰度图
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) if len(img1.shape) == 3 else img1
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) if len(img2.shape) == 3 else img2
    
    # 🔥 图像预处理：增强对比度和细节
    # 使用直方图均衡化增强特征
    gray1 = cv2.equalizeHist(gray1)
    gray2 = cv2.equalizeHist(gray2)
    
    # 轻微高斯模糊以减少噪声
    gray1 = cv2.GaussianBlur(gray1, (5, 5), 0.5)
    gray2 = cv2.GaussianBlur(gray2, (5, 5), 0.5)
    
    if method == "farneback":
        # 🔥 优化参数：更精细的光流计算
        flow = cv2.calcOpticalFlowFarneback(
            gray1, gray2, 
            None,
            pyr_scale=0.5,      # 金字塔缩放比例
            levels=5,            # 🔥 增加金字塔层数，提高精度
            winsize=25,          # 🔥 增大窗口，捕获更大运动
            iterations=5,         # 🔥 增加迭代次数，提高精度
            poly_n=7,            # 🔥 增大多项式邻域
            poly_sigma=1.5,      # 🔥 增大多项式标准差
            flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN  # 🔥 使用高斯权重
        )
    elif method == "lucas_kanade":
        # Lucas-Kanade 稀疏光流法（需要特征点）
        # 这里简化为使用 Farneback
        flow = cv2.calcOpticalFlowFarneback(
            gray1, gray2, None,
            pyr_scale=0.5, levels=5, winsize=25,
            iterations=5, poly_n=7, poly_sigma=1.5,
            flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN
        )
    else:
        raise ValueError(f"Unknown optical flow method: {method}")
    
    # 🔥 平滑光流场，减少噪声
    flow = cv2.GaussianBlur(flow, (5, 5), 1.0)
    
    return flow


def warp_image_by_flow(img: np.ndarray, flow: np.ndarray, alpha: float) -> np.ndarray:
    """
    根据光流和插值系数变形图像
    
    Args:
        img: 输入图像
        flow: 光流向量场
        alpha: 插值系数 (0.0 = 第一张图像, 1.0 = 第二张图像)
    
    Returns:
        warped: 变形后的图像
    """
    h, w = img.shape[:2]
    
    # 创建坐标网格
    x, y = np.meshgrid(np.arange(w), np.arange(h))
    
    # 根据光流和插值系数计算新坐标
    # 从第一张图像到第二张图像的变形
    new_x = (x + flow[:, :, 0] * alpha).astype(np.float32)
    new_y = (y + flow[:, :, 1] * alpha).astype(np.float32)
    
    # 使用双线性插值进行图像变形
    warped = cv2.remap(img, new_x, new_y, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
    
    return warped


def generate_morph_frames(img1: np.ndarray, img2: np.ndarray, 
                          num_frames: int = 10,
                          method: str = "optical_flow",
                          blend_mode: str = "linear") -> List[np.ndarray]:
    """
    生成两张图像之间的中间帧（变形过渡）- 优化版
    
    Args:
        img1: 第一张图像 (BGR格式)
        img2: 第二张图像 (BGR格式)
        num_frames: 生成的中间帧数量
        method: 过渡方法 ("optical_flow", "crossfade", "hybrid")
        blend_mode: 混合模式 ("linear", "ease_in_out", "sigmoid")
    
    Returns:
        frames: 中间帧列表（包括起始和结束帧）
    """
    # 🔥 确保图像尺寸一致（使用高质量插值）
    if img1.shape != img2.shape:
        h2, w2 = img2.shape[:2]
        img1 = cv2.resize(img1, (w2, h2), interpolation=cv2.INTER_LANCZOS4)
    
    frames = []
    
    if method == "crossfade":
        # 简单的交叉淡化（增强版）
        for i in range(num_frames + 1):
            alpha = i / num_frames
            if blend_mode == "ease_in_out":
                # 更平滑的缓动曲线
                alpha = alpha * alpha * (3 - 2 * alpha)
            elif blend_mode == "sigmoid":
                alpha = 1 / (1 + np.exp(-10 * (alpha - 0.5)))
            
            # 🔥 使用更平滑的混合
            blended = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
            frames.append(blended)
    
    elif method == "optical_flow":
        # 🔥 基于光流法的变形过渡（优化版）
        print(f"    计算光流...")
        flow = calculate_optical_flow(img1, img2)
        
        # 🔥 计算光流强度，用于自适应混合
        flow_magnitude = np.sqrt(flow[:, :, 0]**2 + flow[:, :, 1]**2)
        avg_flow = np.mean(flow_magnitude)
        max_flow = np.max(flow_magnitude)
        
        print(f"    光流统计: 平均={avg_flow:.2f}, 最大={max_flow:.2f}")
        
        for i in range(num_frames + 1):
            alpha = i / num_frames
            
            # 应用混合模式
            if blend_mode == "ease_in_out":
                # 使用更平滑的缓动曲线
                alpha = alpha * alpha * (3 - 2 * alpha)
            elif blend_mode == "sigmoid":
                alpha = 1 / (1 + np.exp(-10 * (alpha - 0.5)))
            
            # 🔥 从第一张图像变形（使用平滑的光流）
            warped1 = warp_image_by_flow(img1, flow, alpha)
            
            # 🔥 从第二张图像反向变形
            reverse_flow = -flow * (1 - alpha)
            warped2 = warp_image_by_flow(img2, reverse_flow, 1 - alpha)
            
            # 🔥 改进的混合策略
            # 如果光流较小，增加直接混合的权重
            if avg_flow < 5.0:
                # 光流较小，使用更多直接混合
                direct_blend = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
                warped_blend = cv2.addWeighted(warped1, 1 - alpha, warped2, alpha, 0)
                blended = cv2.addWeighted(warped_blend, 0.5, direct_blend, 0.5, 0)
            else:
                # 光流较大，主要使用变形混合
                blended = cv2.addWeighted(warped1, 1 - alpha, warped2, alpha, 0)
            
            # 🔥 后处理：轻微锐化和对比度增强
            blended = cv2.convertScaleAbs(blended, alpha=1.0, beta=0)
            
            frames.append(blended)
    
    elif method == "hybrid":
        # 🔥 混合方法：光流 + 交叉淡化（优化版）
        print(f"    计算光流（混合模式）...")
        flow = calculate_optical_flow(img1, img2)
        
        for i in range(num_frames + 1):
            alpha = i / num_frames
            
            if blend_mode == "ease_in_out":
                alpha = alpha * alpha * (3 - 2 * alpha)
            elif blend_mode == "sigmoid":
                alpha = 1 / (1 + np.exp(-10 * (alpha - 0.5)))
            
            # 光流变形
            warped1 = warp_image_by_flow(img1, flow, alpha)
            reverse_flow = -flow * (1 - alpha)
            warped2 = warp_image_by_flow(img2, reverse_flow, 1 - alpha)
            
            # 🔥 改进的混合策略
            warped_blend = cv2.addWeighted(warped1, 1 - alpha, warped2, alpha, 0)
            
            # 直接的交叉淡化
            direct_blend = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
            
            # 🔥 动态调整混合权重（中间帧更多使用光流，两端更多使用直接混合）
            if 0.2 < alpha < 0.8:
                # 中间部分：主要使用光流变形
                final = cv2.addWeighted(warped_blend, 0.8, direct_blend, 0.2, 0)
            else:
                # 两端：更多使用直接混合，使过渡更自然
                final = cv2.addWeighted(warped_blend, 0.4, direct_blend, 0.6, 0)
            
            frames.append(final)
    
    else:
        raise ValueError(f"Unknown morph method: {method}")
    
    return frames


def create_smooth_transition_clip(img1_path: Union[str, Path],
                                 img2_path: Union[str, Path],
                                 duration: float,
                                 fps: int = 24,
                                 transition_method: str = "optical_flow",
                                 blend_mode: str = "ease_in_out") -> VideoClip:
    """
    创建平滑过渡视频片段（支持多种方法，包括高级变形）
    
    如果 transition_method 是 "enhanced_optical_flow"，会使用高级变形模块
    """
    # 🔥 如果使用高级方法，尝试使用高级模块
    if transition_method == "enhanced_optical_flow":
        try:
            from mm_story_agent.advanced_image_transition import create_advanced_transition_clip
            print(f"    使用高级变形方法: {transition_method}")
            return create_advanced_transition_clip(
                img1_path, img2_path, duration, fps, transition_method, blend_mode
            )
        except ImportError as e:
            print(f"    ⚠️  高级变形模块不可用: {e}，回退到标准方法")
            # 回退到标准方法
            transition_method = "optical_flow"
    
    # 原有的标准实现（当不使用高级方法时）
    # 读取图像
    img1 = cv2.imread(str(img1_path))
    img2 = cv2.imread(str(img2_path))
    
    if img1 is None:
        raise FileNotFoundError(f"无法读取图像: {img1_path}")
    if img2 is None:
        raise FileNotFoundError(f"无法读取图像: {img2_path}")
    
    # 🔥 确保图像尺寸一致（使用较大的尺寸）
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    target_h = max(h1, h2)
    target_w = max(w1, w2)
    
    if img1.shape[:2] != (target_h, target_w):
        img1 = cv2.resize(img1, (target_w, target_h), interpolation=cv2.INTER_LINEAR)
    if img2.shape[:2] != (target_w, target_h):
        img2 = cv2.resize(img2, (target_w, target_h), interpolation=cv2.INTER_LINEAR)
    
    # 🔥 计算需要的帧数（增加帧数以获得更平滑的过渡）
    num_frames = int(duration * fps)
    num_frames = max(15, num_frames)  # 🔥 至少15帧，确保过渡更平滑
    # 如果时长较短，增加帧率
    if duration < 0.5:
        num_frames = max(num_frames, int(0.5 * fps))
    
    # 生成中间帧
    frames = generate_morph_frames(
        img1, img2, 
        num_frames=num_frames,
        method=transition_method,
        blend_mode=blend_mode
    )
    
    # 转换为 RGB（确保所有帧都是 RGB 格式）
    frames_rgb = []
    for f in frames:
        if len(f.shape) == 3:
            if f.shape[2] == 3:
                # 假设是 BGR，转换为 RGB
                frames_rgb.append(cv2.cvtColor(f, cv2.COLOR_BGR2RGB))
            elif f.shape[2] == 4:
                # RGBA，转换为 RGB
                frames_rgb.append(cv2.cvtColor(f, cv2.COLOR_BGRA2RGB))
            else:
                frames_rgb.append(f)
        else:
            frames_rgb.append(f)
    
    # 创建视频片段
    clips = []
    frame_duration = duration / len(frames_rgb)
    
    for frame in frames_rgb:
        clip = ImageClip(frame).set_duration(frame_duration).set_fps(fps)
        clips.append(clip)
    
    # 拼接所有帧
    transition_clip = concatenate_videoclips(clips, method="compose")
    
    return transition_clip


def create_morphed_sequence(image_paths: List[Union[str, Path]],
                           durations: List[float],
                           fps: int = 24,
                           transition_duration: float = 0.5,
                           transition_method: str = "optical_flow",
                           blend_mode: str = "ease_in_out") -> VideoClip:
    """
    创建多张图像的平滑过渡序列
    
    Args:
        image_paths: 图像路径列表
        durations: 每张图像的显示持续时间列表（秒）
        fps: 帧率
        transition_duration: 过渡持续时间（秒）
        transition_method: 过渡方法
        blend_mode: 混合模式
    
    Returns:
        final_clip: 完整的视频序列
    """
    if len(image_paths) == 0:
        raise ValueError("图像路径列表不能为空")
    
    if len(image_paths) == 1:
        # 只有一张图像
        img = cv2.imread(str(image_paths[0]))
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        return ImageClip(img_rgb).set_duration(durations[0]).set_fps(fps)
    
    clips = []
    
    for i in range(len(image_paths)):
        # 添加图像片段
        img = cv2.imread(str(image_paths[i]))
        if img is None:
            print(f"⚠️  警告：无法读取图像 {image_paths[i]}，跳过")
            continue
        
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        image_clip = ImageClip(img_rgb).set_duration(durations[i]).set_fps(fps)
        
        # 如果不是第一张图像，添加过渡
        if i > 0:
            try:
                transition_clip = create_smooth_transition_clip(
                    image_paths[i-1],
                    image_paths[i],
                    duration=transition_duration,
                    fps=fps,
                    transition_method=transition_method,
                    blend_mode=blend_mode
                )
                clips.append(transition_clip)
            except Exception as e:
                print(f"⚠️  创建过渡失败: {e}，使用简单交叉淡化")
                # 回退到简单交叉淡化
                transition_clip = ImageClip(img_rgb).set_duration(transition_duration).set_fps(fps)
                transition_clip = transition_clip.crossfadein(transition_duration)
                clips.append(transition_clip)
        
        clips.append(image_clip)
    
    # 拼接所有片段
    final_clip = concatenate_videoclips(clips, method="compose")
    
    return final_clip

