from torchvision import models
import torch.nn as nn
import torch
import torch.nn.functional as F
from model import resnet34


class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, activation=nn.ReLU(), normalization=nn.BatchNorm2d, dropout_prob=0.1):
        super(ConvBlock, self).__init__()
        layers = []
        layers.append(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding))
        if normalization is not None:
            layers.append(normalization(out_channels))
        layers.append(activation)
        if dropout_prob > 0:
            layers.append(nn.Dropout(dropout_prob))
        self.block = nn.Sequential(*layers)
        
    def forward(self, x):
        return self.block(x)



class RegressionCNN(nn.Module):
    def __init__(self):
        super(RegressionCNN, self).__init__()
        # 定义卷积层
        self.conv_layers = nn.Sequential(
            ConvBlock(3, 4, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBlock(4, 8, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBlock(8, 4, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBlock(4, 3, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )

        self.fc1 = nn.Linear(196, 64)
        self.fc2 = nn.Linear(64, 2)
        
    def forward(self, x):
        # 前向传播过程
        # print(self.conv_layers[0].block[0].weight[0,0,0,0])
        x = self.conv_layers(x)
        # 将特征图展平为一维向量
        x = x.flatten(2)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        x = torch.sigmoid(x)  # 使用sigmoid激活函数输出介于0到1之间的值
        return x



class HiLo_Transform(torch.nn.Module):
    def __init__(self, M, N) -> None:
        super().__init__()
           # 提取高频成分
        self.predict_sigma = RegressionCNN()   
        crow, ccol = M // 2, N // 2
        self.pos = torch.ones((M, N, 2)).cuda()
        for x in range(M):
            for y in range(N):
                self.pos[x, y, 0] = -(x-crow)**2
                self.pos[x, y, 1] = -(y-ccol)**2
        
        self.pos = self.pos.reshape(-1, 2)
        self.pos.requires_grad = False


    def forward(self, img):
        b, _, M, N = img.shape
        sigma = self.predict_sigma(img)
        # sigma (batchsize, 3, 2) self.mask_high_pass (M×N, 2)
        sigma = sigma.reshape(-1, 2) # sigma (batchsize×3, 2)
        
        low_pass = sigma @ self.pos.T
        low_pass = low_pass.reshape(b, 3, M, N)
        low_pass = low_pass.exp()
        high_pass = 1-low_pass

        freq_ = torch.fft.fft2(img)
        freq = torch.fft.fftshift(freq_)
        high = high_pass * freq  
        low = low_pass * freq

        high = torch.fft.ifftshift(high)   
        high = torch.fft.ifft2(high)

        low = torch.fft.ifftshift(low)   
        low = torch.fft.ifft2(low)
        # return ori_img.real
        return torch.abs(high), torch.abs(low)




class Model(nn.Module):
    def __init__(self, num_class=2):
        super().__init__()
        self.hilo_transform = HiLo_Transform(224, 224)
        self.encoder = resnet34(num_classes=2, include_top=False)
        self.avgpool = nn.AdaptiveAvgPool3d((512, 7, 7))
        self.classifier = nn.Sequential(
           
            nn.Linear(512 * 7 * 7, 1024),
            nn.ReLU(),
            nn.Linear(1024, 1024),
            nn.ReLU(),
            nn.Linear(1024, num_class)
        )

    def forward(self, x):
        high_freq_features, low_freq_features = self.hilo_transform(x)
        high_freq_features = self.encoder(high_freq_features)
        low_freq_features = self.encoder(low_freq_features)
        x = torch.cat([high_freq_features, low_freq_features], dim=1)
        x = self.avgpool(x)
        # 改变tensor形状，拉伸成一维
        x = x.view(x.size(0), -1)
        out = self.classifier(x)
        return out
    

if __name__ == '__main__':
    model = Model().cuda()
    x = torch.randn(10, 3, 224, 224).cuda()
    y = model(x)
    print(y.shape)
