import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

from basicsr.utils.registry import ARCH_REGISTRY

# Xavier Initialization
def xavier(net):
    for m in net.modules():
        if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                m.bias.data.zero_()


class SingleLayer(nn.Module):
    def __init__(self, inChannels, growthRate):
        super(SingleLayer, self).__init__()
        self.conv = nn.Conv2d(inChannels, growthRate, kernel_size=3, padding=1, bias=True)

    def forward(self, x):
        out = F.relu(self.conv(x))
        out = torch.cat((x, out), 1)
        return out


class SingleBlock(nn.Module):
    def __init__(self, inChannels, growthRate, nDenselayer):
        super(SingleBlock, self).__init__()
        self.block = self._make_dense(inChannels, growthRate, nDenselayer)

    def _make_dense(self, inChannels, growthRate, nDenselayer):
        layers = []
        for i in range(int(nDenselayer)):
            layers.append(SingleLayer(inChannels, growthRate))
            inChannels += growthRate
        return nn.Sequential(*layers)

    def forward(self, x):
        out = self.block(x)
        return out


@ARCH_REGISTRY.register()
class SRDenseNetArch(nn.Module):
    """SRDenseNet architecture.

    Args:
        num_in_ch (int): Channel number of inputs. Default: 3.
        num_out_ch (int): Channel number of outputs. Default: 3.
        growth_rate (int): Growth rate in dense blocks. Default: 16.
        num_blocks (int): Number of dense blocks. Default: 8.
        num_layers (int): Number of layers in each dense block. Default: 8.
        upscale (int): Upsampling factor. Default: 4.
    """

    def __init__(self, num_in_ch=3, num_out_ch=3, growth_rate=16, num_blocks=8, num_layers=8, upscale=4):
        super(SRDenseNetArch, self).__init__()

        self.conv1 = nn.Conv2d(num_in_ch, growth_rate, kernel_size=3, padding=1, bias=True)

        in_channels = growth_rate

        self.denseblock = self._make_block(in_channels, growth_rate, num_layers, num_blocks)
        in_channels += growth_rate * num_layers * num_blocks

        self.Bottleneck = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=1, padding=0, bias=True)

        # Calculate the number of transposed convolution layers needed based on upscale factor
        num_transpose_layers = 0
        current_factor = 1
        while current_factor < upscale:
            current_factor *= 2
            num_transpose_layers += 1

        # Create transposed convolution layers
        self.transpose_layers = nn.ModuleList()
        for i in range(num_transpose_layers):
            self.transpose_layers.append(nn.ConvTranspose2d(
                in_channels=128, out_channels=128, kernel_size=4, stride=2, padding=1, bias=True))

        self.conv2 = nn.Conv2d(in_channels=128, out_channels=num_out_ch, kernel_size=3, padding=1, bias=True)

        # Initialize weights
        xavier(self)

    def _make_block(self, inChannels, growthRate, nDenselayer, nBlock):
        blocks = []
        for i in range(int(nBlock)):
            blocks.append(SingleBlock(inChannels, growthRate, nDenselayer))
            inChannels += growthRate * nDenselayer
        return nn.Sequential(*blocks)

    def forward(self, x):
        out = F.relu(self.conv1(x))
        out = self.denseblock(out)
        out = self.Bottleneck(out)

        # Apply transposed convolution layers
        for layer in self.transpose_layers:
            out = layer(out)

        HR = self.conv2(out)
        return HR