import torch
import torch.nn.functional as F
from timm.models.layers import create_classifier
from torch import nn as nn

from training.models.aspp import ASPP
from training.models.xception_aligned import xception65

__all__ = ['deeplab_v3_plus']


class Decoder(nn.Module):

    def __init__(self, low_level_inplanes):
        super(Decoder, self).__init__()

        self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(48)
        self.relu = nn.ReLU()

        self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)
        self.last_conv = nn.Sequential(
            nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Dropout(0.5),

            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Dropout(0.1),

            nn.Conv2d(256, 1, kernel_size=1, stride=1),
        )
        self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)

        self._init_weight()

    def forward(self, low_level_feat, aspp_feat):
        low_level_feat = self.conv1(low_level_feat)
        low_level_feat = self.bn1(low_level_feat)
        low_level_feat = self.relu(low_level_feat)

        aspp_feat = self.upsample1(aspp_feat)

        x = torch.cat((low_level_feat, aspp_feat), dim=1)
        x = self.last_conv(x)
        x = self.upsample2(x)

        return x

    def _init_weight(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                torch.nn.init.kaiming_normal_(m.weight)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()


class DeepLabV3Plus(nn.Module):

    def __init__(self, output_stride=16, num_classes=2, num_features=2048, drop_rate=0., pretrained=False):
        super(DeepLabV3Plus, self).__init__()

        self.num_classes = num_classes
        self.num_features = num_features
        self.drop_rate = drop_rate
        self.low_level_inplanes = 128

        self.encoder = xception65(pretrained=pretrained, output_stride=output_stride, features_only=True)
        self.global_pool, self.classifier = create_classifier(self.num_features, self.num_classes, pool_type='avg')
        self.aspp = ASPP(in_channels=2048, atrous_rates=[12, 24, 36])
        self.decoder = Decoder(self.low_level_inplanes)

    def forward(self, inputs):
        low_level_feat, high_level_feat = self.encoder(inputs)
        aspp_feat = self.aspp(high_level_feat)
        mask = self.decoder(low_level_feat, aspp_feat)
        x = self.global_pool(high_level_feat)
        if self.drop_rate > 0.:
            x = F.dropout(x, p=self.drop_rate, training=self.training)
        x = self.classifier(x)
        return x, mask

    def _get_params(self, modules):
        for i in range(len(modules)):
            for m in modules[i].named_modules():
                if self.freeze_bn:
                    if isinstance(m[1], nn.Conv2d):
                        for p in m[1].parameters():
                            if p.requires_grad:
                                yield p

    def get_1x_lr_params(self):
        modules = [self.encoder, self.classifier]
        yield from self._get_params(modules)

    def get_10x_lr_params(self):
        modules = [self.aspp, self.decoder]
        yield from self._get_params(modules)


def deeplab_v3_plus(num_classes=2, pretrained=False):
    model = DeepLabV3Plus(num_classes=num_classes, pretrained=pretrained)
    model.default_cfg = model.encoder.default_cfg
    return model
