import torch 
from PIL import Image
import numpy as np
import torch.nn as nn
from einops import rearrange, repeat, einsum
from einops.layers.torch import Rearrange
import torch.nn.functional as F


class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, activation=nn.ReLU(), normalization=nn.BatchNorm2d, dropout_prob=0.1):
        super(ConvBlock, self).__init__()
        layers = []
        layers.append(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding))
        if normalization is not None:
            layers.append(normalization(out_channels))
        layers.append(activation)
        if dropout_prob > 0:
            layers.append(nn.Dropout(dropout_prob))
        self.block = nn.Sequential(*layers)
        
    def forward(self, x):
        return self.block(x)



class RegressionCNN(nn.Module):
    def __init__(self):
        super(RegressionCNN, self).__init__()
        # 定义卷积层
        self.conv_layers = nn.Sequential(
            ConvBlock(3, 8, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBlock(8, 16, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBlock(16, 8, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
            ConvBlock(8, 3, kernel_size=3, padding=1),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )

        self.fc1 = nn.Linear(196, 64)
        self.fc2 = nn.Linear(64, 2)
        
    def forward(self, x):
        # 前向传播过程
        # print(self.conv_layers[0].block[0].weight[0,0,0,0])
        x = self.conv_layers(x)
        # 将特征图展平为一维向量
        x = x.flatten(2)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        x = torch.sigmoid(x)  # 使用sigmoid激活函数输出介于0到1之间的值
        return x



class HiLo_Transform(torch.nn.Module):
    def __init__(self, M, N) -> None:
        super().__init__()
           # 提取高频成分
        self.predict_sigma = RegressionCNN()   
        crow, ccol = M // 2, N // 2
        self.pos = torch.ones((M, N, 2)).cuda()
        for x in range(M):
            for y in range(N):
                self.pos[x, y, 0] = -(x-crow)**2
                self.pos[x, y, 1] = -(y-ccol)**2
        
        self.pos = self.pos.reshape(-1, 2)
        self.pos.requires_grad = False


    def forward(self, img):
        b, _, M, N = img.shape
        sigma = self.predict_sigma(img)
        # sigma (batchsize, 3, 2) self.mask_high_pass (M×N, 2)
        sigma = sigma.reshape(-1, 2) # sigma (batchsize×3, 2)
        
        low_pass = sigma @ self.pos.T
        low_pass = low_pass.reshape(b, 3, M, N)
        low_pass = low_pass.exp()
        high_pass = 1-low_pass

        freq_ = torch.fft.fft2(img)
        freq = torch.fft.fftshift(freq_)
        high = high_pass * freq  
        low = low_pass * freq

        high = torch.fft.ifftshift(high)   
        high = torch.fft.ifft2(high)

        low = torch.fft.ifftshift(low)   
        low = torch.fft.ifft2(low)
        # return ori_img.real
        return torch.abs(high), torch.abs(low)


####################################################################
########################自注意力部分#################################


class FeedForward(nn.Module):
    def __init__(self, dim, hidden_dim, dropout=0.):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim, hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, dim), 
            nn.Dropout(dropout)
        )
    def forward(self, x):
        return self.net(x)


class Attention(nn.Module):              
    def __init__(self, dim, heads=8, dim_head=64, dropout=0.):
        super().__init__()
        inner_dim = dim_head * heads
        project_out = not (heads == 1 and dim_head == dim)
        self.heads = heads
        self.scale = dim_head ** -0.5
        self.attend = nn.Softmax(dim=-1)
        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout),
        ) if project_out else nn.Identity()


    def forward(self, q, k, v):
        dots = einsum(q, k, 'b h i d, b h j d -> b h i j') * self.scale
        attn = self.attend(dots)
        out = einsum(attn, v, 'b h i j, b h j d -> b h i d')
        out = rearrange(out, 'b h n d -> b n (h d)')
        return self.to_out(out)


class to_qkv(nn.Module):
    def __init__(self, dim, inner_dim, heads) -> None:
        super().__init__()
        self.heads = heads
        self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)

    def forward(self, x):
        b, n, _, h = *x.shape, self.heads
        qkv = self.to_qkv(x).chunk(3, dim=-1)         
        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv)
        return q, k, v 


class Encoder(nn.Module):
    def __init__(self, ) -> None:
        super().__init__()
        self.encoder = nn.Sequential(
        nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
        nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
        nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
        nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),
        nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.Conv2d(512, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
        )

    def forward(self, x):
        x =  self.encoder(x)
        return x


class Transformer(nn.Module):
    def __init__(self):
        super().__init__()
        
        self.hi_encoder = Encoder()
        self.lo_encoder = Encoder()
    
    def forward(self, hi, lo):
        hi = self.hi_encoder(hi)
        lo = self.lo_encoder(lo)
        hi_q, hi_k, hi_v = torch.chunk(hi, 3, dim=1)
        lo_q, lo_k, lo_v = torch.chunk(lo, 3, dim=1)
        
        hi = lo_q * hi_k * hi_v
        lo = hi_q * lo_k * lo_v
        x = torch.cat([hi, lo], dim=1)
        return x


########################高低频自注意力部分############################
####################################################################


class Hilo(nn.Module):
    def __init__(self):
        super().__init__()
        self.hilo_transform = HiLo_Transform(224, 224)
        self.encoder = Transformer()
        self.classification_layer = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Flatten(start_dim=1),
            nn.Linear(512, 128),
            nn.Dropout(0.1),
            nn.Linear(128, 32),
            nn.Dropout(0.1),
            nn.Linear(32, 2),
        )
    

    def forward(self, x):
        high_freq_features, low_freq_features = self.hilo_transform(x)
        x = self.encoder(high_freq_features, low_freq_features) 
        x = self.classification_layer(x)
        return x


if __name__ == '__main__':
    model = Hilo().cuda()
    x = torch.randn(10, 3, 224, 224).cuda()
    y = model(x)
    print(y.shape)
