"""
高级图像变形模块 - 支持增强光流法
"""
import cv2
import numpy as np
from typing import List, Tuple, Union, Optional
from pathlib import Path
from moviepy.editor import ImageClip, VideoClip, concatenate_videoclips


def calculate_dense_optical_flow_enhanced(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:
    """
    增强的密集光流计算 - 使用多种方法组合
    """
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) if len(img1.shape) == 3 else img1
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) if len(img2.shape) == 3 else img2
    
    # 预处理：增强对比度
    gray1 = cv2.equalizeHist(gray1)
    gray2 = cv2.equalizeHist(gray2)
    
    # 使用 Farneback 方法（优化参数）
    flow_farneback = cv2.calcOpticalFlowFarneback(
        gray1, gray2, None,
        pyr_scale=0.5,
        levels=5,
        winsize=25,
        iterations=5,
        poly_n=7,
        poly_sigma=1.5,
        flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN
    )
    
    # 使用 DualTVL1 方法（更精确但更慢）
    try:
        dual_tvl1 = cv2.createOptFlow_DualTVL1()
        flow_tvl1 = dual_tvl1.calc(gray1, gray2, None)
        
        # 混合两种光流结果
        flow = 0.7 * flow_farneback + 0.3 * flow_tvl1
    except:
        # 如果 DualTVL1 不可用，只使用 Farneback
        flow = flow_farneback
    
    # 平滑光流场
    flow = cv2.GaussianBlur(flow, (7, 7), 1.5)
    
    return flow


def warp_image_advanced(img: np.ndarray, flow: np.ndarray, alpha: float) -> np.ndarray:
    """
    高级图像变形 - 使用更精确的插值方法
    """
    h, w = img.shape[:2]
    x, y = np.meshgrid(np.arange(w), np.arange(h))
    
    # 计算新坐标
    new_x = (x + flow[:, :, 0] * alpha).astype(np.float32)
    new_y = (y + flow[:, :, 1] * alpha).astype(np.float32)
    
    # 使用更高质量的插值
    warped = cv2.remap(
        img, new_x, new_y, 
        cv2.INTER_LANCZOS4,  # 使用 LANCZOS4 插值，质量更高
        borderMode=cv2.BORDER_REPLICATE
    )
    
    return warped


def generate_morph_frames_enhanced(
    img1: np.ndarray, 
    img2: np.ndarray, 
    num_frames: int = 20,
    method: str = "enhanced_optical_flow",
    blend_mode: str = "ease_in_out"
) -> List[np.ndarray]:
    """
    增强的中间帧生成 - 生成更明显、更平滑的过渡
    
    Args:
        img1: 第一张图像 (BGR格式)
        img2: 第二张图像 (BGR格式)
        num_frames: 生成的中间帧数量
        method: 过渡方法
        blend_mode: 混合模式
    """
    # 确保图像尺寸一致
    if img1.shape != img2.shape:
        h2, w2 = img2.shape[:2]
        img1 = cv2.resize(img1, (w2, h2), interpolation=cv2.INTER_LANCZOS4)
    
    frames = []
    
    if method == "enhanced_optical_flow":
        print(f"    使用增强光流法生成 {num_frames} 帧...")
        flow = calculate_dense_optical_flow_enhanced(img1, img2)
        
        # 计算光流强度
        flow_magnitude = np.sqrt(flow[:, :, 0]**2 + flow[:, :, 1]**2)
        avg_flow = np.mean(flow_magnitude)
        max_flow = np.max(flow_magnitude)
        print(f"    光流统计: 平均={avg_flow:.2f}, 最大={max_flow:.2f}")
        
        for i in range(num_frames + 1):
            alpha = i / num_frames
            
            # 应用缓动曲线
            if blend_mode == "ease_in_out":
                alpha = alpha * alpha * (3 - 2 * alpha)
            elif blend_mode == "sigmoid":
                alpha = 1 / (1 + np.exp(-10 * (alpha - 0.5)))
            
            # 双向变形
            warped1 = warp_image_advanced(img1, flow, alpha)
            reverse_flow = -flow * (1 - alpha)
            warped2 = warp_image_advanced(img2, reverse_flow, 1 - alpha)
            
            # 自适应混合
            if avg_flow < 3.0:
                # 光流很小，使用更多直接混合
                direct_blend = cv2.addWeighted(img1, 1 - alpha, img2, alpha, 0)
                warped_blend = cv2.addWeighted(warped1, 1 - alpha, warped2, alpha, 0)
                blended = cv2.addWeighted(warped_blend, 0.4, direct_blend, 0.6, 0)
            else:
                # 光流较大，主要使用变形
                blended = cv2.addWeighted(warped1, 1 - alpha, warped2, alpha, 0)
            
            # 后处理：增强对比度和清晰度
            lab = cv2.cvtColor(blended, cv2.COLOR_BGR2LAB)
            l, a, b = cv2.split(lab)
            clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
            l = clahe.apply(l)
            blended = cv2.merge([l, a, b])
            blended = cv2.cvtColor(blended, cv2.COLOR_LAB2BGR)
            
            frames.append(blended)
    
    else:
        # 回退到增强光流法
        return generate_morph_frames_enhanced(img1, img2, num_frames, "enhanced_optical_flow", blend_mode)
    
    return frames


def create_advanced_transition_clip(
    img1_path: Union[str, Path],
    img2_path: Union[str, Path],
    duration: float,
    fps: int = 24,
    transition_method: str = "enhanced_optical_flow",
    blend_mode: str = "ease_in_out"
) -> VideoClip:
    """
    创建高级平滑过渡视频片段
    
    Args:
        img1_path: 第一张图像路径
        img2_path: 第二张图像路径
        duration: 过渡持续时间（秒）
        fps: 帧率
        transition_method: 过渡方法 ("enhanced_optical_flow")
        blend_mode: 混合模式
    
    Returns:
        clip: 过渡视频片段
    """
    # 读取图像
    img1 = cv2.imread(str(img1_path))
    img2 = cv2.imread(str(img2_path))
    
    if img1 is None or img2 is None:
        raise FileNotFoundError(f"无法读取图像: {img1_path} 或 {img2_path}")
    
    # 统一尺寸
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    target_h = max(h1, h2)
    target_w = max(w1, w2)
    
    if img1.shape[:2] != (target_h, target_w):
        img1 = cv2.resize(img1, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
    if img2.shape[:2] != (target_w, target_h):
        img2 = cv2.resize(img2, (target_w, target_h), interpolation=cv2.INTER_LANCZOS4)
    
    # 🔥 增加帧数，使过渡更明显
    num_frames = int(duration * fps)
    num_frames = max(20, num_frames)  # 至少20帧
    if duration < 0.6:
        num_frames = max(num_frames, int(0.6 * fps))
    
    print(f"    生成 {num_frames} 个中间帧（时长: {duration:.2f}秒）...")
    
    # 生成中间帧
    frames = generate_morph_frames_enhanced(
        img1, img2,
        num_frames=num_frames,
        method=transition_method,
        blend_mode=blend_mode
    )
    
    print(f"    ✅ 成功生成 {len(frames)} 帧")
    
    # 转换为 RGB
    frames_rgb = []
    for f in frames:
        if len(f.shape) == 3:
            if f.shape[2] == 3:
                frames_rgb.append(cv2.cvtColor(f, cv2.COLOR_BGR2RGB))
            elif f.shape[2] == 4:
                frames_rgb.append(cv2.cvtColor(f, cv2.COLOR_BGRA2RGB))
            else:
                frames_rgb.append(f)
        else:
            frames_rgb.append(f)
    
    # 创建视频片段
    clips = []
    frame_duration = duration / len(frames_rgb)
    
    for frame in frames_rgb:
        clip = ImageClip(frame).set_duration(frame_duration).set_fps(fps)
        clips.append(clip)
    
    # 拼接所有帧
    transition_clip = concatenate_videoclips(clips, method="compose")
    
    return transition_clip

