import numpy as np
import cv2
from tqdm import tqdm

def detect_rods(frame_gray):
    # 使用Sobel算子增强垂直边缘
    sobelx = cv2.Sobel(frame_gray, cv2.CV_64F, 1, 0, ksize=3)
    sobelx = cv2.convertScaleAbs(sobelx)
    
    # 增强对比度
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
    sobelx = clahe.apply(sobelx)
    
    # 使用自适应阈值，更好地处理局部区域
    binary = cv2.adaptiveThreshold(sobelx, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                 cv2.THRESH_BINARY, 11, 2)
    
    # 形态学操作增强垂直线条
    kernel = np.ones((20,1), np.uint8)
    binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)
    binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, np.ones((1,3), np.uint8))
    
    return binary

def stabilize_video(input_path, output_path):
    # 读取视频
    cap = cv2.VideoCapture(input_path)
    if not cap.isOpened():
        print("无法打开视频文件")
        return
    
    # 获取视频参数
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    
    # 创建视频写入器
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
    
    # 读取第一帧作为参考
    ret, reference_frame = cap.read()
    if not ret:
        print("无法读取视频帧")
        return
    
    reference_gray = cv2.cvtColor(reference_frame, cv2.COLOR_BGR2GRAY)
    reference_rods = detect_rods(reference_gray)
    
    # 存储所有帧和变换
    frames = []
    transforms = []
    
    print("第一遍：读取视频帧并计算变换...")
    # 首先读取所有帧并计算变换
    with tqdm(total=total_frames-1) as pbar:
        while True:
            ret, curr_frame = cap.read()
            if not ret:
                break
            
            frames.append(curr_frame)
            
            # 检测当前帧的棒束
            curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
            curr_rods = detect_rods(curr_gray)
            
            # 使用模板匹配找到位移
            result = cv2.matchTemplate(curr_rods, reference_rods, cv2.TM_CCOEFF_NORMED)
            _, max_val, _, max_loc = cv2.minMaxLoc(result)
            
            # 计算x方向的位移，完全补偿
            dx = max_loc[0] - 0  # 完全补偿位移
            
            # 创建变换矩阵
            transform = np.eye(3, dtype=np.float32)
            transform[0, 2] = -dx  # 完全补偿x方向的位移
            
            transforms.append(transform[:2])
            pbar.update(1)
    
    print("第二遍：平滑变换并应用...")
    # 平滑变换，使用更大的窗口
    smoothing_window = 31  # 使用更大的平滑窗口
    smoothed_transforms = []
    for i in range(len(transforms)):
        window_start = max(0, i - smoothing_window // 2)
        window_end = min(len(transforms), i + smoothing_window // 2)
        window = transforms[window_start:window_end]
        
        # 使用加权平均，中间帧权重更大
        weights = np.exp(-0.1 * ((np.arange(len(window)) - len(window)//2) / (len(window)//4))**2)
        weights = weights / np.sum(weights)
        
        # 计算加权平均的变换
        smoothed = np.average(window, axis=0, weights=weights)
        smoothed_transforms.append(smoothed)
    
    # 应用平滑后的变换并写入视频
    with tqdm(total=len(frames)) as pbar:
        for i, frame in enumerate(frames):
            # 应用变换
            stabilized_frame = cv2.warpAffine(frame, smoothed_transforms[i], 
                                            (width, height),
                                            flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
            
            # 裁剪边缘
            border = 50  # 增加裁剪边框，确保不会看到边缘效应
            stabilized_frame = stabilized_frame[:, border:width-border]
            
            # 调整大小以匹配原始尺寸
            stabilized_frame = cv2.resize(stabilized_frame, (width, height))
            
            out.write(stabilized_frame)
            pbar.update(1)
    
    # 释放资源
    cap.release()
    out.release()
    print("视频稳定处理完成！")

if __name__ == "__main__":
    input_video = r"F:\test\output_video_stabilized.mp4"  # 第一次处理后的视频
    output_video = r"F:\test\output_video_stabilized_final.mp4"  # 二次防抖后的视频
    stabilize_video(input_video, output_video) 