"""
PIV防抖算法 - 用于处理棒束通道中窗口抖动的图像稳定算法

主要功能：
1. 对PIV实验中拍摄的图像序列进行防抖处理
2. 通过边缘检测和裁剪实现图像稳定

代码架构：
1. ImageCropper类：核心图像裁剪处理类
   - find_right_edge(): 检测图像右边缘位置
   - initialize_with_reference(): 使用参考帧初始化裁剪参数
   - crop_image(): 执行图像裁剪操作

2. 辅助函数：
   - detect_vertical_lines(): 检测图像中的垂直线条
   - process_single_image(): 处理单张图片的函数
   - process_images_to_video(): 批量处理图片并生成视频

算法流程：
1. 读取实验图像序列
2. 对每帧图像进行边缘检测
3. 通过垂直投影和梯度分析定位右边缘
4. 裁切边缘线右侧的图像
5. 多进程并行处理加速运算
6. 最终生成防抖后的视频

使用方法：
1. 设置输入图像文件夹路径（包含连续的BMP图像）
2. 设置输出视频保存路径
3. 运行程序即可自动处理并生成防抖视频

注意事项：
- 输入图像需为BMP格式
- 图像序列中需要有明显的垂直边缘特征
- 默认采样4000帧进行处理，可根据需要调整
"""

import cv2
import os
import numpy as np
from tqdm import tqdm
import multiprocessing
from multiprocessing import Pool
from functools import partial

class ImageCropper:
    def __init__(self):
        self.crop_width = None  # 裁剪宽度
        self.reference_right_edge = None  # 参考右边缘位置
        self.target_width = None  # 目标输出宽度
        self.target_height = None  # 目标输出高度
        
    def find_right_edge(self, binary_img):
        # 计算垂直投影
        projection = np.sum(binary_img, axis=0)
        
        # 使用一维高斯卷积减少噪声影响
        window = 15
        sigma = window/3.0
        x = np.linspace(-window//2, window//2, window)
        gaussian = np.exp(-(x**2)/(2*sigma**2))
        gaussian = gaussian/np.sum(gaussian)
        smoothed = np.convolve(projection, gaussian, mode='valid')
        
        # 计算梯度
        gradient = np.gradient(smoothed)
        
        # 从右向左扫描找到第一个显著边缘
        threshold = np.std(gradient) * 2
        for i in range(len(gradient)-1, window, -1):
            if abs(gradient[i]) > threshold:
                # 使用局部最大值优化边缘位置
                local_region = gradient[max(0, i-5):min(len(gradient), i+6)]
                local_offset = np.argmax(np.abs(local_region))
                return i - 5 + local_offset
        return None
    
    def initialize_with_reference(self, reference_img):
        # 转换为灰度图
        gray = cv2.cvtColor(reference_img, cv2.COLOR_BGR2GRAY)
        
        # 增强对比度
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        gray = clahe.apply(gray)
        
        # 检测垂直线
        binary = detect_vertical_lines(gray)
        
        # 找到右边缘
        right_edge = self.find_right_edge(binary)
        if right_edge is not None:
            self.reference_right_edge = right_edge
            # 设置目标输出尺寸为参考帧裁剪后的尺寸
            self.target_height = reference_img.shape[0]
            self.target_width = right_edge  # 使用右边缘位置作为目标宽度
    
    def crop_image(self, img):
        if self.target_width is None or self.target_height is None:
            return img
            
        # 转换为灰度图并增强对比度
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
        gray = clahe.apply(gray)
        
        # 检测垂直线
        binary = detect_vertical_lines(gray)
        
        # 找到当前帧的右边缘
        current_right_edge = self.find_right_edge(binary)
        
        if current_right_edge is None:
            return None
            
        # 裁剪图像
        cropped = img[:, :current_right_edge]
        
        # 如果裁剪后的宽度与目标宽度不同，进行调整
        if cropped.shape[1] != self.target_width:
            cropped = cv2.resize(cropped, (self.target_width, self.target_height))
        
        return cropped

def detect_vertical_lines(gray_img):
    # 使用Sobel算子增强垂直边缘检测
    sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=5)
    sobelx = cv2.convertScaleAbs(sobelx)
    
    # 使用自适应阈值
    binary = cv2.adaptiveThreshold(sobelx, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                 cv2.THRESH_BINARY, 21, 2)
    
    # 形态学操作增强垂直线条
    kernel_vertical = np.ones((15,1), np.uint8)
    binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel_vertical)
    binary = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel_vertical)
    
    return binary

def process_single_image(args):
    img_path, cropper = args
    try:
        # 读取当前帧
        current_frame = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
        
        if current_frame is None:
            return None
        
        # 裁剪图像
        cropped_frame = cropper.crop_image(current_frame)
        
        return cropped_frame
    except Exception as e:
        print(f"处理图片 {img_path} 时发生错误: {str(e)}")
        return None

def process_and_save_images(input_path, output_folder, sample_count=4000):
    try:
        # 获取所有bmp文件
        image_files = [f for f in os.listdir(input_path) if f.lower().endswith('.bmp')]
        if not image_files:
            print(f"错误：在 {input_path} 中没有找到BMP文件")
            return
        image_files.sort()
        
        total_images = len(image_files)
        if total_images < sample_count:
            sample_count = total_images
            
        # 修改采样方式，确保准确的采样数量
        indices = np.linspace(0, total_images-1, sample_count, dtype=int)
        selected_files = [os.path.join(input_path, image_files[i]) for i in indices]
        
        print(f"总图像数: {total_images}")
        print(f"计划采样数: {sample_count}")
        print(f"实际采样数: {len(selected_files)}")
        
        # 读取第一张图片作为参考帧
        first_image_path = os.path.join(input_path, image_files[0])
        print(f"正在读取第一张图片: {first_image_path}")
        reference_frame = cv2.imdecode(np.fromfile(first_image_path, dtype=np.uint8), cv2.IMREAD_COLOR)
        
        if reference_frame is None:
            print(f"错误：无法读取图片 {first_image_path}")
            return
        
        # 创建裁剪器并初始化
        cropper = ImageCropper()
        cropper.initialize_with_reference(reference_frame)
        
        # 获取裁剪后的尺寸
        first_processed = cropper.crop_image(reference_frame)
        height, width = first_processed.shape[:2]
        print(f"处理后图像尺寸: {width}x{height}")
        
        # 创建输出文件夹
        os.makedirs(output_folder, exist_ok=True)
        
        # 准备并行处理的参数
        process_args = [(f, cropper) for f in selected_files]
        
        # 获取CPU核心数
        num_cores = multiprocessing.cpu_count()
        print(f"使用 {num_cores} 个CPU核心进行并行处理")
        
        # 创建进程池
        with Pool(num_cores) as pool:
            # 使用tqdm显示进度
            processed_frames = list(tqdm(
                pool.imap(process_single_image, process_args),
                total=len(process_args),
                desc="处理图片"
            ))
        
        # 保存处理后的图片
        saved_count = 0
        print("正在保存处理后的图片...")
        for i, frame in enumerate(tqdm(processed_frames)):
            if frame is not None:
                try:
                    output_path = os.path.join(output_folder, f"processed_{i:04d}.bmp")
                    # 使用imencode保存图片
                    _, img_encoded = cv2.imencode('.bmp', frame)
                    img_encoded.tofile(output_path)
                    saved_count += 1
                except Exception as e:
                    print(f"保存图片 {output_path} 时发生错误: {str(e)}")
                    continue
        
        print(f"成功处理并保存的图片数: {saved_count}")
        return saved_count
        
    except Exception as e:
        print(f"发生错误: {str(e)}")
        return 0

def create_video_from_images(input_folder, output_path, fps=30.0):
    try:
        # 获取所有处理后的图片
        image_files = [f for f in os.listdir(input_folder) if f.lower().endswith('.bmp')]
        if not image_files:
            print(f"错误：在 {input_folder} 中没有找到处理后的图片")
            return
        image_files.sort()
        
        print(f"找到 {len(image_files)} 个图片文件")
        
        # 读取所有图片并检查尺寸
        print("正在检查所有图片尺寸...")
        images = []
        sizes = set()
        for image_file in tqdm(image_files):
            img_path = os.path.join(input_folder, image_file)
            img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR)
            if img is not None:
                sizes.add((img.shape[1], img.shape[0]))  # (width, height)
                images.append((image_file, img))
            else:
                print(f"警告：无法读取图片 {img_path}")
        
        if len(sizes) > 1:
            print(f"警告：检测到不同的图片尺寸: {sizes}")
            # 使用最常见的尺寸
            size_counts = {}
            for img_file, img in images:
                size = (img.shape[1], img.shape[0])
                size_counts[size] = size_counts.get(size, 0) + 1
            
            target_size = max(size_counts.items(), key=lambda x: x[1])[0]
            print(f"将使用最常见的尺寸: {target_size[0]}x{target_size[1]}")
            
            # 调整图片尺寸
            print("正在调整图片尺寸...")
            uniform_images = []
            for img_file, img in tqdm(images):
                if (img.shape[1], img.shape[0]) != target_size:
                    img = cv2.resize(img, target_size)
                uniform_images.append((img_file, img))
            images = uniform_images
        
        if not images:
            print("错误：没有可用的图片")
            return
            
        width, height = images[0][1].shape[1], images[0][1].shape[0]
        print(f"最终图片尺寸: {width}x{height}")
        
        # 创建视频写入器
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
        
        if not out.isOpened():
            print("错误：无法创建视频写入器")
            return
        
        # 写入所有图片
        print("正在生成视频...")
        frames_written = 0
        for _, img in tqdm(images):
            out.write(img)
            frames_written += 1
        
        out.release()
        print(f"视频生成完成！")
        print(f"写入帧数: {frames_written}")
        print(f"预计视频时长: {frames_written/fps:.2f}秒")
        
    except Exception as e:
        print(f"发生错误: {str(e)}")
        import traceback
        traceback.print_exc()

if __name__ == '__main__':
    # 使用原始字符串表示路径
    input_path = r"F:\李双成\B-1\200-4"
    processed_folder = r"F:\边缘处理结果"
    output_video = r"F:\test\output_video_stabilized_right.avi"
    
    # 第一步：处理并保存图片
    print("第一步：处理并保存图片...")
    saved_count = process_and_save_images(input_path, processed_folder, sample_count=4000)
    
    if saved_count > 0:
        # 第二步：从处理后的图片创建视频
        print("\n第二步：生成视频...")
        create_video_from_images(processed_folder, output_video)
