from torch import nn as nn
from torch.nn import functional as F

from basicsr.archs.arch_util import default_init_weights
from basicsr.utils.registry import ARCH_REGISTRY


@ARCH_REGISTRY.register()
class FSRCNNArch(nn.Module):
    """FSRCNN architecture.

    FSRCNN: Fast Super-Resolution Convolutional Neural Network
    Paper: Accelerating the Super-Resolution Convolutional Neural Network
    https://arxiv.org/abs/1608.00367

    Args:
        num_in_ch (int): Channel number of inputs. Default: 3.
        num_out_ch (int): Channel number of outputs. Default: 3.
        num_feat (int): Channel number of intermediate features. Default: 56.
        num_fast_feat (int): Channel number of fast features. Default: 12.
        upscale (int): Upsampling factor. Default: 2.
        num_block (int): Number of mapping layers. Default: 4.
    """

    def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=56, num_fast_feat=12, upscale=2, num_block=4):
        super(FSRCNNArch, self).__init__()

        # Feature extraction
        self.feature_extraction = nn.Conv2d(num_in_ch, num_feat, kernel_size=5, stride=1, padding=2)
        self.prelu1 = nn.PReLU(num_parameters=num_feat)

        # Shrinking
        self.shrink = nn.Conv2d(num_feat, num_fast_feat, kernel_size=1, stride=1, padding=0)
        self.prelu2 = nn.PReLU(num_parameters=num_fast_feat)

        # Mapping
        mapping_layers = []
        for _ in range(num_block):
            mapping_layers.append(nn.Conv2d(num_fast_feat, num_fast_feat, kernel_size=3, stride=1, padding=1))
            mapping_layers.append(nn.PReLU(num_parameters=num_fast_feat))
        self.mapping = nn.Sequential(*mapping_layers)

        # Expanding
        self.expand = nn.Conv2d(num_fast_feat, num_feat, kernel_size=1, stride=1, padding=0)
        self.prelu3 = nn.PReLU(num_parameters=num_feat)

        # Deconvolution for upsampling
        self.deconv = nn.ConvTranspose2d(num_feat, num_out_ch, kernel_size=9, stride=upscale, padding=4, output_padding=upscale-1)

        # Initialize weights
        self._initialize_weights()

    def _initialize_weights(self):
        """Initialize weights for the model."""
        # Initialize feature extraction and deconvolution layers with larger scale
        default_init_weights([self.feature_extraction, self.deconv], scale=0.1)
        # Initialize other layers with default scale
        default_init_weights([self.shrink, self.expand], scale=1.0)
        # Initialize mapping layers
        for layer in self.mapping:
            if isinstance(layer, nn.Conv2d):
                default_init_weights([layer], scale=1.0)

    def forward(self, x):
        # Feature extraction
        x = self.prelu1(self.feature_extraction(x))
        # Shrinking
        x = self.prelu2(self.shrink(x))
        # Mapping
        x = self.mapping(x)
        # Expanding
        x = self.prelu3(self.expand(x))
        # Deconvolution for upsampling
        x = self.deconv(x)
        return x