from torch import nn as nn
from torch.nn import functional as F

from basicsr.archs.arch_util import default_init_weights
from basicsr.utils.registry import ARCH_REGISTRY


@ARCH_REGISTRY.register()
class SRCNNArch(nn.Module):
    """SRCNN architecture.

    Args:
        num_in_ch (int): Channel number of inputs. Default: 3.
        num_out_ch (int): Channel number of outputs. Default: 3.
        num_feat (int): Channel number of intermediate features. Default: 64.
        upscale (int): Upsampling factor. Default: 2.
    """

    def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, upscale=2):
        super(SRCNNArch,self).__init__()
        # Feature extraction layer.
        self.features = nn.Sequential(
            nn.Conv2d(num_in_ch, num_feat, 9, 1, 4),
            nn.ReLU(True)
        )
        # Non-linear mapping layer.
        self.map = nn.Sequential(
            nn.Conv2d(num_feat, num_feat//2, 1, 1, 0),
            nn.ReLU(True)
        )
        # Reconstruction layer
        self.reconstruction = nn.Conv2d(num_feat//2, num_out_ch, 5, 1, 2)
        # Upsampling layer for 2x super-resolution
        self.upsample = nn.Upsample(scale_factor = upscale, mode = 'bicubic', align_corners = False)
        # Initialize weights
        default_init_weights([self.features[0], self.map[0], self.reconstruction], scale = 0.1)

    def forward(self, x):
        # Upsample input to target resolution
        x = self.upsample(x)
        # Feature extraction
        x = self.features(x)
        # Non-linear mapping
        x = self.map(x)
        # Reconstruction
        x = self.reconstruction(x)
        return x
