from torch import nn
import torch
import parameters as p


class GeneratorLayer(nn.Module):
    def __init__(self,
                 in_channels, out_channels, factor: int = 2,
                 use_norm=True, activation='relu',
                 stride=2, padding=1,
                 *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        '''
        notice : activation has tanh or relu optional.
        '''
        self.conv = nn.ConvTranspose2d(
            in_channels, out_channels, stride=stride, kernel_size=4, padding=padding, bias=False)
        self.In = nn.BatchNorm2d(
            out_channels) if use_norm else nn.Identity()
        self.act = nn.ReLU() if activation.lower() == 'relu' else nn.Tanh()

    def forward(self, x):
        return self.act(self.In(self.conv(x)))


class DiscriminatorLayer(nn.Module):
    def __init__(self,
                 in_channels, out_channels,
                 use_norm=True, use_activation=True,
                 stride=2, padding=2,
                 *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.conv = nn.Conv2d(in_channels, out_channels,
                              kernel_size=6, stride=stride, padding=padding, bias=False)
        
        self.In = nn.LayerNorm(
            out_channels) if use_norm else nn.Identity()
        self.act = nn.LeakyReLU(0.02) if use_activation else nn.Identity()

    def forward(self, x):
        return self.act(self.In(self.conv(x)))


class Generator(nn.Module):
    def __init__(self, stages: int = p.STAGES, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.layers = nn.ModuleList([])

        for stage in reversed(range(stages)):
            in_channels = (2**(stage+1)) * \
                p.FILTERS_BASELINE if stage != stages-1 else 1
            out_channels = (2**(stage))*p.FILTERS_BASELINE if stage != 0 else 3
            use_norm = stage != 0
            # use_norm = False
            activation = 'relu' if stage != 0 else 'tanh'

            self.layers.append(GeneratorLayer(
                in_channels, out_channels, use_norm=use_norm, activation=activation)
                if stage != stages-1 else nn.Sequential(
                nn.ConvTranspose2d(in_channels, out_channels,
                                   kernel_size=4, stride=1, padding=1),
                nn.ConvTranspose2d(out_channels, out_channels,
                                   kernel_size=4, stride=2, padding=2),
                nn.BatchNorm2d(out_channels) if use_norm else nn.Identity(),
                nn.ReLU())

            )

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            o = layer(o if i else x)
        return o


class Discriminator(nn.Module):
    def __init__(self, stages: int = p.STAGES, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.layers = nn.ModuleList([])

        for stage in range(stages):

            in_channels = (2**(stage))*p.FILTERS_BASELINE if stage != 0 else 3
            out_channels = (2**(stage+1))*p.FILTERS_BASELINE

            use_norm = stage != 0
            # use_norm = False
            use_activation = True
            self.layers.append(DiscriminatorLayer(
                in_channels, out_channels, use_norm=use_norm, use_activation=use_activation))

        # self.out_conv = nn.Conv2d(16,1,kernel_size=15,padding=0,stride=2)
        in_features = int(out_channels * (p.IMAGE_CROP_SIZE / (2**stages))**2)
        self.mlp = nn.Sequential(nn.Flatten(), nn.Linear(in_features, 1))

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            o = layer(o if i else x)
        return self.mlp(o)


if __name__ == "__main__":
    from torch import randn

    # img = randn([4,3,512,512],device='cuda')
    # # tar = torch.ones([4,1,1,1])
    # d = Discriminator()
    # for module in d.modules():
    #     print(module)
    # l = d(img)
    # # print(l)
    # print(l.shape)
    # for pams in d.parameters():
    #     print(pams.device)
    # print(nn.functional.l1_loss(l,tar))

    x = randn([4, 1, 16, 16])
    g = Generator()
    for module in g.modules():
        print(module)
    y = g(x)
    print(y.shape)
