import pywt
import torch
from torch import nn
import torch.nn.functional as F

# 对高频部分的三个参数进行归一化，并保证其相加为1

def create_wavelet_filter(wave, in_size, out_size, type=torch.float):
    w = pywt.Wavelet(wave)
    dec_hi = torch.tensor(w.dec_hi[::-1], dtype=type)
    dec_lo = torch.tensor(w.dec_lo[::-1], dtype=type)
    dec_filters = torch.stack([dec_lo.unsqueeze(0) * dec_lo.unsqueeze(1),
                               dec_lo.unsqueeze(0) * dec_hi.unsqueeze(1),
                               dec_hi.unsqueeze(0) * dec_lo.unsqueeze(1),
                               dec_hi.unsqueeze(0) * dec_hi.unsqueeze(1)], dim=0)
    dec_filters = dec_filters[:, None].repeat(in_size, 1, 1, 1)

    rec_hi = torch.tensor(w.rec_hi[::-1], dtype=type).flip(dims=[0])
    rec_lo = torch.tensor(w.rec_lo[::-1], dtype=type).flip(dims=[0])
    rec_filters = torch.stack([rec_lo.unsqueeze(0) * rec_lo.unsqueeze(1),
                               rec_lo.unsqueeze(0) * rec_hi.unsqueeze(1),
                               rec_hi.unsqueeze(0) * rec_lo.unsqueeze(1),
                               rec_hi.unsqueeze(0) * rec_hi.unsqueeze(1)], dim=0)
    rec_filters = rec_filters[:, None].repeat(out_size, 1, 1, 1)

    return dec_filters, rec_filters

def wavelet_transform(x, filters):
    b, c, h, w = x.shape
    pad = (filters.shape[2] // 2 - 1, filters.shape[3] // 2 - 1)
    x = F.conv2d(x, filters, stride=2, groups=c, padding=pad)
    x = x.reshape(b, c, 4, h // 2, w // 2)
    return x

def inverse_wavelet_transform(x, filters):
    b, c, _, h_half, w_half = x.shape
    pad = (filters.shape[2] // 2 - 1, filters.shape[3] // 2 - 1)
    x = x.reshape(b, c * 4, h_half, w_half)
    x = F.conv_transpose2d(x, filters, stride=2, groups=c, padding=pad)
    return x

class HighFreqEnhancedWTConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, wt_levels=1, wt_type='db1'):
        super(HighFreqEnhancedWTConv2d, self).__init__()
        assert in_channels == out_channels

        self.in_channels = in_channels
        self.wt_levels = wt_levels

        # 创建小波分解和重构滤波器
        self.wt_filter, self.iwt_filter = create_wavelet_filter(wt_type, in_channels, in_channels, torch.float)
        self.wt_filter = nn.Parameter(self.wt_filter, requires_grad=False)
        self.iwt_filter = nn.Parameter(self.iwt_filter, requires_grad=False)

        # 基础卷积层，用于处理低频成分
        self.low_freq_conv = nn.Conv2d(in_channels, in_channels, kernel_size, padding='same', bias=True)

        # 多层高频卷积层
        self.high_freq_convs_1 = nn.ModuleList([
            nn.Conv2d(in_channels, in_channels, kernel_size, padding='same', bias=True)
            for _ in range(3)
        ])

        self.high_freq_convs_2 = nn.ModuleList([
            nn.Conv2d(in_channels, in_channels, kernel_size, padding='same', bias=True)
            for _ in range(3)
        ])

        # 用于高频融合的可学习系数
        self.alpha_lh = nn.Parameter(torch.tensor(0.1), requires_grad=True)
        self.alpha_hl = nn.Parameter(torch.tensor(0.1), requires_grad=True)
        self.alpha_hh = nn.Parameter(torch.tensor(0.1), requires_grad=True)

    def forward(self, x):
        # 第一级小波分解
        x_wt = wavelet_transform(x, self.wt_filter)
        x_ll = x_wt[:, :, 0, :, :]  # 低频
        x_lh, x_hl, x_hh = x_wt[:, :, 1, :, :], x_wt[:, :, 2, :, :], x_wt[:, :, 3, :, :]  # 三个高频成分

        # 对低频成分应用基础卷积
        x_ll = self.low_freq_conv(x_ll)

        # 对每个高频成分应用第一层卷积
        y_lh_1 = F.relu(self.high_freq_convs_1[0](x_lh))
        y_hl_1 = F.relu(self.high_freq_convs_1[1](x_hl))
        y_hh_1 = F.relu(self.high_freq_convs_1[2](x_hh))

        # 对第一层卷积输出的高频成分应用第二层卷积
        y_lh_2 = self.high_freq_convs_2[0](y_lh_1)
        y_hl_2 = self.high_freq_convs_2[1](y_hl_1)
        y_hh_2 = self.high_freq_convs_2[2](y_hh_1)

        # 使用加权系数融合高频成分，并确保 alpha 参数和为 1
        alpha_sum = torch.softmax(torch.stack([self.alpha_lh, self.alpha_hl, self.alpha_hh]), dim=0)
        alpha_lh_norm = alpha_sum[0]
        alpha_hl_norm = alpha_sum[1]
        alpha_hh_norm = alpha_sum[2]

        x_lh_final = alpha_lh_norm * y_lh_2
        x_hl_final = alpha_hl_norm * y_hl_2
        x_hh_final = alpha_hh_norm * y_hh_2

        # 合并处理后的低频和高频成分
        x_wt_processed = torch.stack([x_ll, x_lh_final, x_hl_final, x_hh_final], dim=2)

        # 逆小波变换还原图像
        x_reconstructed = inverse_wavelet_transform(x_wt_processed, self.iwt_filter)

        return x_reconstructed

# 测试代码
if __name__ == '__main__':
    block = HighFreqEnhancedWTConv2d(in_channels=3, out_channels=3)
    input = torch.rand(4, 3, 480, 640)
    output = block(input)
    print(input.size())
    print(output.size())
