import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .bricks import BuildNormalization, BuildActivation

class BasicBlock(nn.Module):
    expansion = 1
    def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, norm_cfg=None, act_cfg=None):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
        self.bn1 = BuildNormalization(placeholder=planes, norm_cfg=norm_cfg)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = BuildNormalization(placeholder=planes, norm_cfg=norm_cfg)
        self.relu = BuildActivation(act_cfg)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
    '''forward'''
    def forward(self, x):
        identity = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        if self.downsample is not None: identity = self.downsample(x)
        out += identity
        out = self.relu(out)
        return out

class Bottleneck(nn.Module):
    expansion = 2
    def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, norm_cfg=None, act_cfg=None):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn1 = BuildNormalization(placeholder=planes, norm_cfg=norm_cfg)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
        self.bn2 = BuildNormalization(placeholder=planes, norm_cfg=norm_cfg)
        self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, stride=1, padding=0, bias=False)
        self.bn3 = BuildNormalization(placeholder=planes * self.expansion, norm_cfg=norm_cfg)
        self.relu = BuildActivation(act_cfg)
        self.downsample = downsample
        self.stride = stride
        self.dilation = dilation
    '''forward'''
    def forward(self, x):
        identity = x
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)
        out = self.conv3(out)
        out = self.bn3(out)
        if self.downsample is not None: identity = self.downsample(x)
        out += identity
        out = self.relu(out)
        return out

class DAPPM(nn.Module):
    def __init__(self, in_channels, inter_channels, out_channels):
        super(DAPPM,self).__init__()
        self.scale1 = nn.Sequential(
            nn.AvgPool2d(
                kernel_size=5, stride=2, padding=2, count_include_pad =True),
            nn.SyncBatchNorm(
                in_channels),
            nn.ReLU(),
            nn.Conv2d(
                in_channels, inter_channels, kernel_size=1))
        self.scale2 = nn.Sequential(
            nn.AvgPool2d(
                kernel_size=9, stride=4, padding=4, count_include_pad =True),
            nn.SyncBatchNorm(
                in_channels),
            nn.ReLU(),
            nn.Conv2d(
                in_channels, inter_channels, kernel_size=1))
        self.scale3 = nn.Sequential(
            nn.AvgPool2d(
                kernel_size=17, stride=8, padding=8, count_include_pad =True),
            nn.SyncBatchNorm(
                in_channels),
            nn.ReLU(),
            nn.Conv2d(
                in_channels, inter_channels, kernel_size=1))
        self.scale4 = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.SyncBatchNorm(
                in_channels),
            nn.ReLU(),
            nn.Conv2d(
                in_channels, inter_channels, kernel_size=1))
        self.scale0 = nn.Sequential(

            nn.SyncBatchNorm(
                in_channels),
            nn.ReLU(),
            nn.Conv2d(
                in_channels, inter_channels, kernel_size=1))
        self.process1 = nn.Sequential(
            nn.SyncBatchNorm(
                inter_channels),
            nn.ReLU(),
            nn.Conv2d(
                inter_channels,
                inter_channels,
                kernel_size=3,
                padding=1))
        self.process2 = nn.Sequential(
            nn.SyncBatchNorm(
                inter_channels),
            nn.ReLU(),
            nn.Conv2d(
                inter_channels,
                inter_channels,
                kernel_size=3,
                padding=1))
        self.process3 = nn.Sequential(
            nn.SyncBatchNorm(
                inter_channels),
            nn.ReLU(),
            nn.Conv2d(
                inter_channels,
                inter_channels,
                kernel_size=3,
                padding=1))
        self.process4 = nn.Sequential(
            nn.SyncBatchNorm(
                inter_channels),
            nn.ReLU(),
            nn.Conv2d(
                inter_channels,
                inter_channels,
                kernel_size=3,
                padding=1))
        self.compression = nn.Sequential(
            nn.SyncBatchNorm(
                inter_channels * 5),
            nn.ReLU(),
            nn.Conv2d(
                inter_channels * 5,
                out_channels,
                kernel_size=1))
        self.shortcut = nn.Sequential(
            nn.SyncBatchNorm(
                in_channels),
            nn.ReLU(),
            nn.Conv2d(
                in_channels, out_channels, kernel_size=1))

    def forward(self, x):
        x_shape = x.shape[2:]
        x_list = []

        x_list.append(self.scale0(x))
        x_list.append(
            self.process1((F.interpolate(
                self.scale1(x), size=x_shape, mode='bilinear') + x_list[0])))
        x_list.append((self.process2((F.interpolate(
            self.scale2(x), size=x_shape, mode='bilinear') + x_list[1]))))
        x_list.append(
            self.process3((F.interpolate(
                self.scale3(x), size=x_shape, mode='bilinear') + x_list[2])))
        x_list.append(
            self.process4((F.interpolate(
                self.scale4(x), size=x_shape, mode='bilinear') + x_list[3])))

        out = self.compression(torch.cat(x_list, dim=1)) + self.shortcut(x)
        return out

class DDRNet(nn.Module):
    """DDRNet backbone.

    This backbone is the implementation of `Deep Dual-resolution Networks for
    Real-time and Accurate Semantic Segmentation of Road Scenes
    <http://arxiv.org/abs/2101.06085>`_.
    Modified from https://github.com/ydhongHIT/DDRNet.

    Args:
        in_channels (int): Number of input image channels. Default: 3.
        channels: (int): The base channels of DDRNet. Default: 32.
        ppm_channels (int): The channels of PPM module. Default: 128.
        align_corners (bool): align_corners argument of F.interpolate.
            Default: False.
        norm_cfg (dict): Config dict to build norm layer.
            Default: dict(type='BN', requires_grad=True).
        act_cfg (dict): Config dict for activation layer.
            Default: dict(type='ReLU', inplace=True).
        init_cfg (dict, optional): Initialization config dict.
            Default: None.
    """

    def __init__(self,
                 structure_type="",
                 in_channels=3,
                 channels=64,
                 ppm_channels=128,
                 align_corners=False,
                 norm_cfg={'type': 'SyncBatchNorm'},
                 act_cfg={'type': 'ReLU', 'inplace': True},
                 ):
        super(DDRNet,self).__init__()

        self.in_channels = in_channels
        self.ppm_channels = ppm_channels

        self.norm_cfg = norm_cfg
        self.act_cfg = act_cfg
        self.align_corners = align_corners

        # stage 0-2
        self.stem = self._make_stem_layer(in_channels, channels, norm_cfg,act_cfg,num_blocks=2)
        self.relu = nn.ReLU()

        # low resolution(context) branch
        self.context_branch_layers = nn.ModuleList()
        for i in range(3):
            self.context_branch_layers.append(
                self._make_layer(
                    block=BasicBlock if i < 2 else Bottleneck,
                    inplanes=channels * 2**(i + 1),
                    planes=channels * 8 if i > 0 else channels * 4,
                    num_blocks=2 if i < 2 else 1,
                    stride=2))

        # bilateral fusion
        self.compression_1 = nn.Sequential(
            nn.Conv2d(channels*4, channels*2, kernel_size=1, stride=1, padding=0, bias=False),
            BuildNormalization(placeholder=channels*2, norm_cfg=norm_cfg)
        )
        self.down_1 = nn.Sequential(
            nn.Conv2d(channels*2, channels*4, kernel_size=3, stride=2, padding=1, bias=False),
            BuildNormalization(placeholder=channels*4, norm_cfg=norm_cfg)
        )
        self.compression_2 = nn.Sequential(
            nn.Conv2d(channels*8, channels*2, kernel_size=1, stride=1, padding=0, bias=False),
            BuildNormalization(placeholder=channels*2, norm_cfg=norm_cfg)
        )
        self.down_2 = nn.Sequential(
            nn.Conv2d(channels*2, channels*4, kernel_size=3, stride=2, padding=1, bias=False),
            BuildNormalization(placeholder=channels*4, norm_cfg=norm_cfg),
            nn.Conv2d(channels*4, channels*8, kernel_size=3, stride=2, padding=1, bias=False),
            BuildNormalization(placeholder=channels*8, norm_cfg=norm_cfg),
        )

        # high resolution(spatial) branch
        self.spatial_branch_layers = nn.ModuleList()
        for i in range(3):
            self.spatial_branch_layers.append(
                self._make_layer(
                    block=BasicBlock if i < 2 else Bottleneck,
                    inplanes=channels * 2,
                    planes=channels * 2,
                    num_blocks=2 if i < 2 else 1,
                ))

        self.spp = DAPPM(
            channels * 16, ppm_channels, channels * 4)

    def _make_stem_layer(self, in_channels, channels, norm_cfg, act_cfg, num_blocks):
        layers = [
            nn.Sequential(
                nn.Conv2d(in_channels,channels,kernel_size=3, stride=2, padding=1),
                BuildNormalization(placeholder=channels, norm_cfg=norm_cfg),
                BuildActivation(act_cfg),
            ),
            nn.Sequential(
                nn.Conv2d(channels,channels,kernel_size=3, stride=2, padding=1),
                BuildNormalization(placeholder=channels, norm_cfg=norm_cfg),
                BuildActivation(act_cfg),
            )
        ]

        layers.extend([
            self._make_layer(BasicBlock, channels, channels, num_blocks),
            nn.ReLU(),
            self._make_layer(
                BasicBlock, channels, channels * 2, num_blocks, stride=2),
            nn.ReLU(),
        ])

        return nn.Sequential(*layers)

    def _make_layer(self, block, inplanes, planes, num_blocks, stride=1):
        downsample = None
        if stride != 1 or inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(
                    inplanes,
                    planes * block.expansion,
                    kernel_size=1,
                    stride=stride,
                    bias=False),
                BuildNormalization(placeholder=planes * block.expansion, norm_cfg=self.norm_cfg))

        layers = [
            block(
                inplanes=inplanes,
                planes=planes,
                stride=stride,
                downsample=downsample)
        ]
        inplanes = planes * block.expansion
        for i in range(1, num_blocks):
            layers.append(
                block(
                    inplanes=inplanes,
                    planes=planes,
                    stride=1,
                    norm_cfg=self.norm_cfg,
                    act_cfg=None if i == num_blocks - 1 else self.act_cfg))

        return nn.Sequential(*layers)

    def forward(self, x):
        """Forward function."""
        out_size = (x.shape[-2] // 8, x.shape[-1] // 8)

        # stage 0-2
        x = self.stem(x)

        # stage3
        x_c = self.context_branch_layers[0](x)
        x_s = self.spatial_branch_layers[0](x)
        comp_c = self.compression_1(self.relu(x_c))
        x_c += self.down_1(self.relu(x_s))
        x_s += F.interpolate(
            comp_c,
            size=out_size,
            mode='bilinear',
            align_corners=self.align_corners)

        temp_context = x_s.clone()

        # stage4
        x_c = self.context_branch_layers[1](self.relu(x_c))
        x_s = self.spatial_branch_layers[1](self.relu(x_s))
        comp_c = self.compression_2(self.relu(x_c))
        x_c += self.down_2(self.relu(x_s))
        x_s += F.interpolate(
            comp_c,
            size=out_size,
            mode='bilinear',
            align_corners=self.align_corners)

        # stage5
        x_s = self.spatial_branch_layers[2](self.relu(x_s))
        x_c = self.context_branch_layers[2](self.relu(x_c))
        x_c = self.spp(x_c)
        x_c = F.interpolate(
            x_c,
            size=out_size,
            mode='bilinear',
            align_corners=self.align_corners)
        outs = []
        outs.append(x_s + x_c)
        outs.append(temp_context)
        return outs
