import cv2
import numpy as np
import scipy.fftpack as fftpack
import scipy.fft as fft



# 逆滤波
# def inverse_filter(image, kernel, noise_power=0):
#     # 手动调整kernel尺寸
#     kernel_padded = np.zeros_like(image)
#     kh, kw = kernel.shape
#     ih, iw = image.shape
#     start_h, start_w = (ih - kh) // 2, (iw - kw) // 2
#     kernel_padded[start_h:start_h + kh, start_w:start_w + kw] = kernel
#
#     f = fftpack.fft2(image)
#     h = fftpack.fft2(kernel_padded)  # 不再需要s参数
#     g = f / (h + noise_power)
#     result = np.real(fftpack.ifft2(g))
#     return np.clip(result, 0, 255).astype(np.uint8)
def inverse_filter(image, kernel, noise_power):
    if noise_power == 0:
        raise ValueError("噪声功率不能为零")

    # 确保图像和核都有正确的形状
    if image.shape != kernel.shape:
        raise ValueError("图像和核的尺寸不匹配")

    # 计算逆滤波
    G = np.fft.fft2(image)
    H = np.fft.fft2(kernel, s=image.shape)

    # 避免除零错误
    H_Rehabilitation = H + noise_power

    # 计算结果
    G = G / H_Rehabilitation
    result = np.fft.ifft2(G).real

    # 剪裁和类型转换
    result = np.clip(result, 0, 255).astype(np.uint8)

    return result


# 维纳滤波
def wiener_filter(image, kernel, K=0.01):
    f = fft.fft2(image)
    h = fft.fft2(kernel, s=image.shape)
    H_conj = np.conj(h)
    H_squared = np.abs(h) ** 2
    F_hat = (H_conj / (H_squared + K)) * f
    result = np.real(fft.ifft2(F_hat))
    return np.clip(result, 0, 255).astype(np.uint8)



# 约束最小二乘滤波
def constrained_least_squares_filter(image, kernel, gamma=0.01):
    rows, cols = image.shape
    f = fft.fft2(image)
    h = fft.fft2(kernel, s=(rows, cols))
    P = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
    P = fft.fft2(P, s=(rows, cols))
    H_conj = np.conj(h)
    H_squared = np.abs(h) ** 2
    P_squared = np.abs(P) ** 2
    F_hat = (H_conj / (H_squared + gamma * P_squared)) * f
    result = np.real(fft.ifft2(F_hat))
    return np.clip(result, 0, 255).astype(np.uint8)



# 迭代盲复原（IBP）
def iterative_blind_deconvolution(image, kernel_size, iterations=10):
    if len(image.shape) == 3:  # 彩色图像
        channels = cv2.split(image)
        result_channels = []
        for channel in channels:
            channel = channel.astype(np.float32)
            # 使用更合理的PSF初始化，例如高斯分布
            psf = cv2.getGaussianKernel(kernel_size, -1) * cv2.getGaussianKernel(kernel_size, -1).T
            psf = psf / np.sum(psf)
            for _ in range(iterations):
                deblurred = cv2.filter2D(channel, -1, psf)
                error = channel - deblurred
                psf += cv2.filter2D(error, -1, np.flipud(np.fliplr(deblurred)))
                if np.isnan(psf).any() or np.isinf(psf).any():
                    print("PSF出现异常值，停止迭代")
                    break
                psf = psf / np.sum(psf)
            result_channel = cv2.filter2D(channel, -1, psf)
            result_channel = np.clip(result_channel, 0, 255).astype(np.uint8)
            result_channels.append(result_channel)
        return cv2.merge(result_channels)
    else:  # 灰度图像
        image = image.astype(np.float32)
        psf = cv2.getGaussianKernel(kernel_size, -1) * cv2.getGaussianKernel(kernel_size, -1).T
        psf = psf / np.sum(psf)
        for _ in range(iterations):
            deblurred = cv2.filter2D(image, -1, psf)
            error = image - deblurred
            psf += cv2.filter2D(error, -1, np.flipud(np.fliplr(deblurred)))
            if np.isnan(psf).any() or np.isinf(psf).any():
                print("PSF出现异常值，停止迭代")
                break
            psf = psf / np.sum(psf)
        result = cv2.filter2D(image, -1, psf)
        result = np.clip(result, 0, 255).astype(np.uint8)
        return result

# 凸集投影（POCS）
def projection_onto_convex_sets(image, kernel, iterations=10):
    rows, cols = image.shape
    f = fftpack.fft2(image)
    h = fftpack.fft2(kernel, s=(rows, cols))
    H_conj = np.conj(h)
    H_squared = np.abs(h) ** 2
    F_hat = f
    for _ in range(iterations):
        G = F_hat * h
        g = np.real(fftpack.ifft2(G))
        g = np.clip(g, 0, 255)
        G_prime = fftpack.fft2(g)
        F_hat = F_hat + (H_conj / H_squared) * (G_prime - G)
    result = np.real(fftpack.ifft2(F_hat))
    return np.clip(result, 0, 255).astype(np.uint8)