import torch
import torch.nn as nn
from basicsr.utils.registry import ARCH_REGISTRY

class RDB_Conv(nn.Module):
    def __init__(self, inChannels, growRate, kSize=3):
        super(RDB_Conv, self).__init__()
        Cin = inChannels
        G  = growRate
        self.conv = nn.Sequential(*[
            nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),
            nn.ReLU()
        ])

    def forward(self, x):
        out = self.conv(x)
        return torch.cat((x, out), 1)

class RDB(nn.Module):
    def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
        super(RDB, self).__init__()
        G0 = growRate0
        G  = growRate
        C  = nConvLayers

        convs = []
        for c in range(C):
            convs.append(RDB_Conv(G0 + c*G, G))
        self.convs = nn.Sequential(*convs)

        # Local Feature Fusion
        self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)

    def forward(self, x):
        return self.LFF(self.convs(x)) + x

@ARCH_REGISTRY.register()
class RDN(nn.Module):
    # Initialize RDN network, including input channels, output channels, number of features, number of RDB blocks, number of conv layers per RDB, upscaling factor
    def __init__(self, in_channels, out_channels, num_features, num_blocks, num_layers, upscale_factor):
        super(RDN, self).__init__()  # Call parent class nn.Module's initialization method
        r = upscale_factor  # Upscaling factor
        G0 = num_features  # Initial number of features
        kSize = 3  # Convolution kernel size
    
        # Assign number of RDB blocks, convolution layers, and features to D, C, G
        self.D, C, G = [num_blocks, num_layers, num_features]

        # Shallow feature extraction network
        self.SFENet1 = nn.Conv2d(in_channels, G0, kSize, padding=(kSize-1)//2, stride=1)  # First convolution layer
        self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)  # Second convolution layer

        # Residual dense blocks and dense feature fusion
        self.RDBs = nn.ModuleList()  # Create a module list to store RDB blocks
        for i in range(self.D):
            self.RDBs.append(
                RDB(growRate0=G0, growRate=G, nConvLayers=C)  # Add RDB block to the list
            )

        # Global Feature Fusion
        self.GFF = nn.Sequential(*[
            nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),  # 1x1 convolution layer for fusing RDB outputs
            nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)  # 3x3 convolution layer
        ])

        # Up-sampling network
        if r == 2 or r == 3:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),  # Convolution layer to adjust channels for PixelShuffle
                nn.PixelShuffle(r),  # PixelShuffle layer for upsampling
                nn.Conv2d(G, out_channels, kSize, padding=(kSize-1)//2, stride=1)  # Output convolution layer
            ])
        elif r == 4:
            self.UPNet = nn.Sequential(*[
                nn.Conv2d(G0, G * 2 * 2, kSize, padding=(kSize-1)//2, stride=1),  # First convolution layer
                nn.PixelShuffle(2),  # First PixelShuffle, upsample 2x
                nn.Conv2d(G, G * 2 * 2, kSize, padding=(kSize-1)//2, stride=1),  # Second convolution layer
                nn.PixelShuffle(2),  # Second PixelShuffle, upsample 2x again, total 4x
                nn.Conv2d(G, out_channels, kSize, padding=(kSize-1)//2, stride=1)  # Output convolution layer
            ])
        else:
            raise ValueError("scale must be 2 or 3 or 4.")

    # Forward propagation function
    def forward(self, x):
        f__1 = self.SFENet1(x)  # Pass through first shallow feature extraction network
        x = self.SFENet2(f__1)  # Pass through second shallow feature extraction network
    
        RDBs_out = []  # Create a list to store RDB block outputs
        for i in range(self.D):
            x = self.RDBs[i](x)  # Pass through each RDB block
            RDBs_out.append(x)  # Add RDB block output to the list
    
        x = self.GFF(torch.cat(RDBs_out, 1))  # Concatenate all RDB outputs and pass through global feature fusion network
        x += f__1  # Add global feature fusion output with first shallow feature extraction network output to form residual connection
    
        return self.UPNet(x)  # Pass through upsampling network and return final result