import torch
import torch.nn as nn
from FFC import FFCResnetBlock

class DepthwiseSeparableConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False):
        super(DepthwiseSeparableConv, self).__init__()
        self.dwconv = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels)
        self.pwconv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)

    def forward(self, x):
        x = self.dwconv(x)
        x = self.pwconv(x)
        return x

class upsample_block(nn.Module):
    def __init__(self, inp_dim, oup_dim):
        super().__init__()
        self.layer = nn.Sequential(
            nn.ConvTranspose2d(inp_dim, oup_dim, 4, 2, 1, 0),
            nn.BatchNorm2d(oup_dim),
            nn.ReLU(inplace=True),
            FFCResnetBlock(oup_dim, enable_lfu=False)
        )

    def forward(self, X):
        return self.layer(X)

class downsample_block(nn.Module):
    def __init__(self, inp_dim, oup_dim):
        super().__init__()
        self.layer = nn.Sequential(
            nn.Conv2d(inp_dim, oup_dim, 3, 1, 1, bias=False),
            nn.BatchNorm2d(oup_dim),
            nn.ReLU(inplace=True),
            nn.Conv2d(oup_dim, oup_dim, 3, 2, 1, bias=False),
            nn.BatchNorm2d(oup_dim),
            nn.ReLU(inplace=True)
        )

    def forward(self, X):
        return self.layer(X)

    
class Encoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.layers = nn.Sequential(
            nn.Conv2d(3, 32, 7, 1, 3, bias=False),
            downsample_block(32, 64),
            downsample_block(64, 128),
            downsample_block(128, 256), 
            downsample_block(256, 512),
            downsample_block(512, 1024),
            nn.AdaptiveAvgPool2d(output_size=(1, 1)),
            nn.Flatten()
        )

        self.m_layer = nn.Linear(1024, 1024)
        self.e_layer = nn.Linear(1024, 1024)

        self.weight_init()

    def weight_init(self):
        for n in self.modules():
            if isinstance(n, nn.ConvTranspose2d):
                nn.init.normal_(n.weight.data, 0, 0.02)

            elif isinstance(n, nn.BatchNorm2d):
                nn.init.normal_(n.weight.data, 0, 0.02)
                nn.init.constant_(n.bias.data, 0)

    def forward(self, X):
        Z = self.layers(X)
        mean = self.m_layer(Z)
        log_std = self.e_layer(Z)
        return mean, log_std


class Decoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.layers = nn.Sequential(
            upsample_block(1024, 512),     # 1  -> 2
            upsample_block(512, 256),      # 2  -> 4
            upsample_block(256, 128),      # 4  -> 8
            upsample_block(128, 64),       # 8  -> 16
            upsample_block(64, 32),        # 16 -> 32
            upsample_block(32, 16),        # 32 -> 64
            upsample_block(16, 8),         # 64 -> 128
            nn.Conv2d(8, 3, 3, 1, 1),
            nn.Sigmoid()
        )

        self.weight_init()

    def weight_init(self):
        for n in self.modules():
            if isinstance(n, nn.ConvTranspose2d):
                nn.init.normal_(n.weight.data, 0, 0.02)

            elif isinstance(n, nn.BatchNorm2d):
                nn.init.normal_(n.weight.data, 0, 0.02)
                nn.init.constant_(n.bias.data, 0)

    def forward(self, X:torch.Tensor):
        return self.layers(X.view(-1, 1024, 1, 1))
    
class VAE(nn.Module):
    def __init__(self):
        super().__init__()
        self.encoder = Encoder()
        self.decoder = Decoder()

    def forward(self, X):
        mean, log_std = self.encoder(X)
        embed = mean + torch.exp(log_std)*torch.randn_like(log_std, device=log_std.device)
        out = self.decoder(embed)
        return out, (mean, log_std)
    
    def pred(self, X):
        mean, _ = self.encoder(X)
        out = self.decoder(mean)
        return out

class Discriminator(nn.Module):
    def __init__(self):
        super().__init__()
        self.net = nn.Sequential(
            self.conv_block(3, 64, 5, 2, 1),
            self.conv_block(64, 128, 5, 2, 1),
            self.conv_block(128, 256, 5, 2, 1),
            self.conv_block(256, 512, 5, 2, 1),
            self.conv_block(512, 512, 5, 2, 1),
            self.conv_block(512, 512, 5, 2, 1),
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Flatten()
        )
        self.fc = nn.Sequential(
            nn.Linear(512, 1024),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(1024, 1)
        )
    
    def conv_block(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=False),
            # nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(0.2, inplace=True)
        )
    
    def forward(self, x):
        x = self.net(x)
        out = self.fc(x)
        return out



