import cv2
import numpy as np
from tqdm import tqdm
import torch
import torchvision.transforms.functional as F
from torchvision import transforms

# 读取视频
cap = cv2.VideoCapture('Y:/MP4/1.mp4')
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print(f'frame_width:{frame_width},frame_height:{frame_height},fps:{fps}')

# 输出视频编码器设置
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('Y:/MP4/1_no_watermark.mp4', fourcc, fps, (frame_width, frame_height))

# 使用tqdm显示进度条
with tqdm(total=total_frames, desc="Processing Video") as pbar:
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        # 假设我们知道水印的位置和大小
        y1, y2, x1, x2 = 411, 481, 652, 909  # 修正后的坐标
        # 对每一帧进行去水印处理
        mask = np.zeros(frame.shape[:2], np.uint8)
        cv2.rectangle(mask, (x1, y1), (x2, y2), 255, -1)  # 水印区域掩码

        # 使用OpenCV的inpaint方法进行初步去水印处理
        dst = cv2.inpaint(frame, mask, 3, cv2.INPAINT_TELEA)

        # 将numpy数组转换为PyTorch张量
        frame_tensor = F.to_tensor(dst).unsqueeze(0) * 255.0
        mask_tensor = torch.from_numpy(mask).unsqueeze(0).unsqueeze(0).float()

        # 使用PyTorch进行进一步的修复
        # 这里我们使用简单的图像修复方法，例如使用周围像素的平均值来填充水印区域
        # 首先，将水印区域的像素值设置为0
        frame_tensor = frame_tensor * (1 - mask_tensor)
        # 然后，使用卷积操作来填充水印区域
        # 使用更平滑的高斯核，减少模糊感
        kernel = torch.tensor([
            [1, 4, 6, 4, 1],
            [4, 16, 24, 16, 4],
            [6, 24, 36, 24, 6],
            [4, 16, 24, 16, 4],
            [1, 4, 6, 4, 1]
        ], dtype=torch.float32).unsqueeze(0).unsqueeze(0) / 256.0
        kernel = kernel.repeat(3, 1, 1, 1)  # 将高斯核扩展到3个通道
        # 使用多次卷积，逐步填充水印区域
        filled_frame = frame_tensor
        for _ in range(5):  # 增加卷积次数，使填充更加平滑
            filled_frame = torch.nn.functional.conv2d(filled_frame, kernel, padding=2, groups=3)
        # 对填充后的区域进行归一化处理，减少颜色异常
        filled_frame = filled_frame / filled_frame.max()
        # 将填充后的区域与原始图像结合
        filled_frame = filled_frame * mask_tensor + frame_tensor * (1 - mask_tensor)
        # 将张量转换回numpy数组
        filled_frame = filled_frame.squeeze(0).permute(1, 2, 0).byte().numpy()

        out.write(filled_frame)
        pbar.update(1)  # 更新进度条

cap.release()
out.release()