from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F

from torchvision.models import resnet50, ResNet50_Weights
from torchvision.models.resnet import ResNet, Bottleneck
from torchvision.models._utils import IntermediateLayerGetter


class ResNet50Backbone(nn.Module):
    def __init__(self, pretrained=True):
        super(ResNet50Backbone, self).__init__()
        
        weights = ResNet50_Weights.IMAGENET1K_V1 if pretrained else None
        resnet = resnet50(weights=weights)
        
        self.backbone = IntermediateLayerGetter(
            resnet,
            return_layers={'layer3': 'out', 'layer1': 'low_level'}
        )

    def forward(self, x):
        out = self.backbone(x)
        return out['out'], out['low_level']

class ResNetBackbone(ResNet):
    def __init__(self, pretrained=True, output_stride=16):
        super().__init__(Bottleneck, [3, 4, 23, 3])
        
        if pretrained:
            pretrained_model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet101', pretrained=True)
            self.load_state_dict(pretrained_model.state_dict(), strict=False)
        
        self._apply_atrous_conv(output_stride)
        
    def _apply_atrous_conv(self, output_stride):
        if output_stride == 16:
            dilation_params = {
                'layer4': [2, 2, 2]
            }
        elif output_stride == 8:
            dilation_params = {
                'layer3': [2, 2, 2, 2],
                'layer4': [4, 4, 4]
            }
        else:
            raise ValueError("Invalid output_stride. Supported values: 8 or 16")
        
        self._modify_layer(self.layer3, dilation_params.get('layer3', [1]*4))
        self._modify_layer(self.layer4, dilation_params.get('layer4', [1]*3))
        
    def _modify_layer(self, layer, dilation_rates):
        for block, dilation in zip(layer, dilation_rates):
            for conv in block.conv2:
                if isinstance(conv, nn.Conv2d):
                    conv.dilation = (dilation, dilation)
                    conv.padding = (dilation, dilation)
                    if dilation > 1:
                        conv.stride = (1, 1)
    
    def forward_features(self, x):
        return_layers = {'layer4': 'out', 'layer1': 'low_level'}
        backbone = IntermediateLayerGetter(self, return_layers=return_layers)
        return backbone(x)
    

class DepthwiseConv2d(nn.Module):
    def __init__(self, in_channels, kernel_size, stride=1, padding=0, dilation=1, bias=False):
        super(DepthwiseConv2d, self).__init__()
        self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding,
                                   dilation=dilation, groups=in_channels, bias=bias)

    def forward(self, x):
        return self.depthwise(x)


class SeparableConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=None, dilation=1, bias=False):
        super(SeparableConv2d, self).__init__()
        
        if padding is None:
            padding = (kernel_size - 1) // 2
        
        self.depthwise = DepthwiseConv2d(in_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
        self.pointwise = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias)

    def forward(self, x):
        x = self.depthwise(x)
        x = self.pointwise(x)
        return x


class AtrousSpatialPyramidPooling(nn.Module):
    def __init__(self, in_channels, dilation_rate=[12, 24, 36]):
        super(AtrousSpatialPyramidPooling, self).__init__()
        self.out_1x1 = nn.Sequential(
            nn.Conv2d(in_channels, 256, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )

        self.out_3x3_1 = self._make_block(in_channels, dilation_rate=dilation_rate[0])
        self.out_3x3_2 = self._make_block(in_channels, dilation_rate=dilation_rate[1])
        self.out_3x3_3 = self._make_block(in_channels, dilation_rate=dilation_rate[2])

        self.out_image_pooling_conv = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels, 256, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )

        self.final_conv = nn.Sequential(
            nn.Conv2d(1280, 256, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5)
        )

    def _make_block(self, in_channels, dilation_rate):
        return nn.Sequential(
            nn.Conv2d(in_channels, 256, kernel_size=3, stride=1, padding=dilation_rate, dilation=dilation_rate, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        out_1x1 = self.out_1x1(x)

        out_3x3_1 = self.out_3x3_1(x)
        out_3x3_2 = self.out_3x3_2(x)
        out_3x3_3 = self.out_3x3_3(x)

        out_image_pooling = self.out_image_pooling_conv(x)
        out_image_pooling = F.interpolate(out_image_pooling, size=x.size()[2:], mode='bilinear', align_corners=False)

        x = torch.cat([out_1x1, out_3x3_1, out_3x3_2, out_3x3_3, out_image_pooling], dim=1)

        x = self.final_conv(x)

        return x


class ResBlock(nn.Module):
    def __init__(self, in_channels, out_channels, strides=2, hasResConv=True):
        super(ResBlock, self).__init__()
        if not hasResConv:
            self.skip = nn.Identity()
        else:
            self.skip = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=strides, bias=False),
                nn.BatchNorm2d(out_channels)
            )

        self.sepconv1 = SeparableConv2d(in_channels, out_channels, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.sepconv2 = SeparableConv2d(out_channels, out_channels, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.sepconv3 = SeparableConv2d(out_channels, out_channels, stride=strides, bias=False)
        self.bn3 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        skip = self.skip(x)

        x = F.relu(self.bn1(self.sepconv1(x)))
        x = F.relu(self.bn2(self.sepconv2(x)))
        x = self.bn3(self.sepconv3(x))

        x = x + skip
        return F.relu(x)


class Xception(nn.Module):
    def __init__(self, input_size):
        super(Xception, self).__init__()
        self.entry_conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(32)
        self.entry_conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(64)

        self.block1 = ResBlock(64, 128, strides=2)
        self.block2 = ResBlock(128, 256, strides=2)
        self.block3 = ResBlock(256, 728, strides=2)

        self.middle_blocks = nn.ModuleList([ResBlock(728, 728, strides=1, hasResConv=False) for _ in range(16)])

        self.exit_block = ResBlock(728, 1024, strides=1)

        self.conv_exit1 = SeparableConv2d(1024, 1536, bias=False)
        self.bn_exit1 = nn.BatchNorm2d(1536)

        self.conv_exit2 = SeparableConv2d(1536, 1536, bias=False)
        self.bn_exit2 = nn.BatchNorm2d(1536)

        self.conv_exit3 = SeparableConv2d(1536, 2048, bias=False)
        self.bn_exit3 = nn.BatchNorm2d(2048)

    def forward(self, x):
        x = F.relu(self.bn1(self.entry_conv1(x)))
        x = F.relu(self.bn2(self.entry_conv2(x)))

        low_level_features = self.block1(x)
        x = self.block2(low_level_features)
        x = self.block3(x)

        for block in self.middle_blocks:
            x = block(x)

        x = self.exit_block(x)
        x = F.relu(self.bn_exit1(self.conv_exit1(x)))
        x = F.relu(self.bn_exit2(self.conv_exit2(x)))
        x = F.relu(self.bn_exit3(self.conv_exit3(x)))

        return x, low_level_features


class DeeplabV3Plus(nn.Module):
    def __init__(self, cfg: Dict[str, dict]):
        super(DeeplabV3Plus, self).__init__()
        # self.backbone = ResNetBackbone(pretrained=False)

        num_classes = cfg.get('num_classes')

        self.backbone = ResNet50Backbone()
        self.aspp = AtrousSpatialPyramidPooling(1024)

        self.low_level_conv = nn.Conv2d(256, 48, kernel_size=1, bias=False)
        self.low_level_bn = nn.BatchNorm2d(48)

        self.final_conv1 = nn.Conv2d(304, 256, kernel_size=3, padding=1, bias=False)
        self.final_bn1 = nn.BatchNorm2d(256)
        self.final_conv2 = nn.Conv2d(256, num_classes, kernel_size=1)

    def forward(self, x):
        size = x.size()[2:]

        x, low_level_features = self.backbone(x) # 2048, 256
        x = self.aspp(x) # 256
        x = F.interpolate(x, size=low_level_features.shape[-2:], mode='bilinear', align_corners=False)

        low_level_features = F.relu(self.low_level_bn(self.low_level_conv(low_level_features)))

        x = torch.cat([x, low_level_features], dim=1)

        x = F.relu(self.final_bn1(self.final_conv1(x)))
        x = F.interpolate(x, size=size, mode='bilinear', align_corners=False)
        x = self.final_conv2(x)

        return {'out': x}
