"""
ResNet code gently borrowed from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
"""

from collections import OrderedDict
import math
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.utils import model_zoo
import copy
import numpy as np

__all__ = ['SENet', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152',
           'se_resnext50_32x4d', 'se_resnext101_32x4d']

pretrained_settings = {
    'senet154': {
        'imagenet': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth',
            'input_space': 'RGB',
            'input_size': [3, 224, 224],
            'input_range': [0, 1],
            'mean': [0.485, 0.456, 0.406],
            'std': [0.229, 0.224, 0.225],
            'num_classes': 1000
        }
    },
    'se_resnet50': {
        'imagenet': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth',
            'input_space': 'RGB',
            'input_size': [3, 224, 224],
            'input_range': [0, 1],
            'mean': [0.485, 0.456, 0.406],
            'std': [0.229, 0.224, 0.225],
            'num_classes': 1000
        }
    },
    'se_resnet101': {
        'imagenet': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth',
            'input_space': 'RGB',
            'input_size': [3, 224, 224],
            'input_range': [0, 1],
            'mean': [0.485, 0.456, 0.406],
            'std': [0.229, 0.224, 0.225],
            'num_classes': 1000
        }
    },
    'se_resnet152': {
        'imagenet': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth',
            'input_space': 'RGB',
            'input_size': [3, 224, 224],
            'input_range': [0, 1],
            'mean': [0.485, 0.456, 0.406],
            'std': [0.229, 0.224, 0.225],
            'num_classes': 1000
        }
    },
    'se_resnext50_32x4d': {
        'imagenet': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
            'input_space': 'RGB',
            'input_size': [3, 224, 224],
            'input_range': [0, 1],
            'mean': [0.485, 0.456, 0.406],
            'std': [0.229, 0.224, 0.225],
            'num_classes': 1000
        }
    },
    'se_resnext101_32x4d': {
        'imagenet': {
            'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth',
            'input_space': 'RGB',
            'input_size': [3, 224, 224],
            'input_range': [0, 1],
            'mean': [0.485, 0.456, 0.406],
            'std': [0.229, 0.224, 0.225],
            'num_classes': 1000
        }
    },
}

def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
                     padding=1, bias=False)

def downsample_conv(in_planes, out_planes, kernel_size=3):
    return nn.Sequential(
        nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=2, padding=(kernel_size-1)//2),
        nn.ReLU(inplace=True),
        nn.Conv2d(out_planes, out_planes, kernel_size=kernel_size, padding=(kernel_size-1)//2),
        nn.ReLU(inplace=True)
    )

def predict_disp(in_planes):
    return nn.Sequential(
        nn.Conv2d(in_planes, 1, kernel_size=3, padding=1),
        nn.Sigmoid()
    )

def conv(in_planes, out_planes):
    return nn.Sequential(
        nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=1),
        nn.ReLU(inplace=True)
    )

def upconv(in_planes, out_planes):
    return nn.Sequential(
        nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=2, padding=1, output_padding=1),
        nn.ReLU(inplace=True)
    )

def crop_like(input, ref):
    assert(input.size(2) >= ref.size(2) and input.size(3) >= ref.size(3))
    return input[:, :, :ref.size(2), :ref.size(3)]

def make_layer(inplanes, block, planes, blocks, groups, reduction, stride=1):
    downsample = None
    if stride != 1 or inplanes != planes * block.expansion:
        downsample = nn.Sequential(
            nn.Conv2d(inplanes, planes * block.expansion,
                      kernel_size=1, stride=stride, bias=False),
            nn.BatchNorm2d(planes * block.expansion),
        )

    layers = []
    layers.append(block(inplanes, planes, groups=groups, reduction=reduction, stride=stride, downsample=downsample))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(block(inplanes, planes, groups=groups, reduction=reduction))

    return nn.Sequential(*layers)




class DispSENet(nn.Module):

    def __init__(self, block, groups, reduction, alpha=10, beta=0.01):

        super(DispSENet, self).__init__()

        self.alpha = alpha
        self.beta = beta
        BasicBlock = eval(block)

        conv_planes = [32, 64, 128, 256, 512]
        self.conv1 = downsample_conv(3, conv_planes[0], kernel_size=7)
        self.conv2 = make_layer(conv_planes[0], BasicBlock, conv_planes[1], blocks=2, groups=groups, reduction=reduction, stride=2)
        self.conv3 = make_layer(conv_planes[1]*4, BasicBlock, conv_planes[2], blocks=2, groups=groups, reduction=reduction, stride=2)
        self.conv4 = make_layer(conv_planes[2]*4, BasicBlock, conv_planes[3], blocks=3, groups=groups, reduction=reduction, stride=2)
        self.conv5 = make_layer(conv_planes[3]*4, BasicBlock, conv_planes[4], blocks=3, groups=groups, reduction=reduction, stride=2)
        # self.conv6 = make_layer(conv_planes[4]*4, BasicBlock, conv_planes[5], blocks=3, groups=groups, reduction=reduction, stride=2)
        # self.conv7 = make_layer(conv_planes[5]*4, BasicBlock, conv_planes[6], blocks=3, groups=groups, reduction=reduction, stride=2)

        upconv_planes = [512, 256, 128, 64, 32]
        # self.upconv7 = upconv(conv_planes[6]*4, upconv_planes[0])
        # self.upconv6 = upconv(upconv_planes[0]*4, upconv_planes[1])
        # self.upconv5 = upconv(upconv_planes[1]*4, upconv_planes[2])
        # self.upconv4 = upconv(upconv_planes[2]*4, upconv_planes[3])
        # self.upconv3 = upconv(upconv_planes[3]*4, upconv_planes[4])
        # self.upconv2 = upconv(upconv_planes[4]*4, upconv_planes[5])
        # self.upconv1 = upconv(upconv_planes[5]*4, upconv_planes[6])
        self.upconv5 = upconv(conv_planes[4]*4, upconv_planes[0])
        self.upconv4 = upconv(upconv_planes[0]*4, upconv_planes[1])
        self.upconv3 = upconv(upconv_planes[1]*4, upconv_planes[2])
        self.upconv2 = upconv(upconv_planes[2]*4, upconv_planes[3])
        self.upconv1 = upconv(upconv_planes[3]*4, upconv_planes[4])

        # self.iconv7 = make_layer(upconv_planes[0] + conv_planes[5]*4, BasicBlock, upconv_planes[0], blocks=2, groups=groups, reduction=reduction, stride=1)
        # self.iconv6 = make_layer(upconv_planes[1] + conv_planes[4]*4, BasicBlock, upconv_planes[1], blocks=2, groups=groups, reduction=reduction, stride=1)
        # self.iconv5 = make_layer(upconv_planes[2] + conv_planes[3]*4, BasicBlock, upconv_planes[2], blocks=2, groups=groups, reduction=reduction, stride=1)
        # self.iconv4 = make_layer(upconv_planes[3] + conv_planes[2]*4, BasicBlock, upconv_planes[3], blocks=2, groups=groups, reduction=reduction, stride=1)
        # self.iconv3 = make_layer(1 + upconv_planes[4] + conv_planes[1]*4, BasicBlock, upconv_planes[4], blocks=1, groups=groups, reduction=reduction, stride=1)
        # self.iconv2 = make_layer(1 + upconv_planes[5] + conv_planes[0], BasicBlock, upconv_planes[5], blocks=1, groups=groups, reduction=reduction, stride=1)
        # self.iconv1 = make_layer(1 + upconv_planes[6], BasicBlock, upconv_planes[6], blocks=1, groups=groups, reduction=reduction, stride=1)
        self.iconv5 = make_layer(upconv_planes[0] + conv_planes[3] * 4, BasicBlock, upconv_planes[0], blocks=2,
                                 groups=groups, reduction=reduction, stride=1)
        self.iconv4 = make_layer(upconv_planes[1] + conv_planes[2] * 4, BasicBlock, upconv_planes[1], blocks=2,
                                 groups=groups, reduction=reduction, stride=1)
        self.iconv3 = make_layer(1 + upconv_planes[2] + conv_planes[1] * 4, BasicBlock, upconv_planes[2], blocks=1,
                                 groups=groups, reduction=reduction, stride=1)
        self.iconv2 = make_layer(1 + upconv_planes[3] + conv_planes[0], BasicBlock, upconv_planes[3], blocks=1,
                                 groups=groups, reduction=reduction, stride=1)
        self.iconv1 = make_layer(1 + upconv_planes[4], BasicBlock, upconv_planes[4], blocks=1,
                                 groups=groups, reduction=reduction, stride=1)

        # self.predict_disp6 = predict_disp(upconv_planes[1]*4)
        # self.predict_disp5 = predict_disp(upconv_planes[2]*4)
        # self.predict_disp4 = predict_disp(upconv_planes[3]*4)
        # self.predict_disp3 = predict_disp(upconv_planes[4]*4)
        # self.predict_disp2 = predict_disp(upconv_planes[5]*4)
        # self.predict_disp1 = predict_disp(upconv_planes[6]*4)
        self.predict_disp4 = predict_disp(upconv_planes[1]*4)
        self.predict_disp3 = predict_disp(upconv_planes[2]*4)
        self.predict_disp2 = predict_disp(upconv_planes[3]*4)
        self.predict_disp1 = predict_disp(upconv_planes[4]*4)


    def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                nn.init.xavier_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()

    def forward(self, x):
        out_conv1 = self.conv1(x)
        out_conv2 = self.conv2(out_conv1)
        out_conv3 = self.conv3(out_conv2)
        out_conv4 = self.conv4(out_conv3)
        out_conv5 = self.conv5(out_conv4)
        # out_conv6 = self.conv6(out_conv5)
        # out_conv7 = self.conv7(out_conv6)

        # out_upconv7 = crop_like(self.upconv7(out_conv7), out_conv6)
        # concat7 = torch.cat((out_upconv7, out_conv6), 1)
        # out_iconv7 = self.iconv7(concat7)

        # out_upconv6 = crop_like(self.upconv6(out_iconv7), out_conv5)
        # concat6 = torch.cat((out_upconv6, out_conv5), 1)
        # out_iconv6 = self.iconv6(concat6)
        # disp6 = self.alpha * self.predict_disp6(out_iconv6) + self.beta

        # out_upconv5 = crop_like(self.upconv5(out_iconv6), out_conv4)
        # concat5 = torch.cat((out_upconv5, out_conv4), 1)
        # out_iconv5 = self.iconv5(concat5)
        # disp5 = self.alpha * self.predict_disp5(out_iconv5) + self.beta
        #
        # out_upconv4 = crop_like(self.upconv4(out_iconv5), out_conv3)
        # concat4 = torch.cat((out_upconv4, out_conv3), 1)
        # out_iconv4 = self.iconv4(concat4)
        # disp4 = self.alpha * self.predict_disp4(out_iconv4) + self.beta

        out_upconv5 = crop_like(self.upconv5(out_conv5), out_conv4)
        concat5 = torch.cat((out_upconv5, out_conv4), 1)
        out_iconv5 = self.iconv5(concat5)

        out_upconv4 = crop_like(self.upconv4(out_iconv5), out_conv3)
        concat4 = torch.cat((out_upconv4, out_conv3), 1)
        out_iconv4 = self.iconv4(concat4)
        disp4 = self.alpha * self.predict_disp4(out_iconv4) + self.beta

        out_upconv3 = crop_like(self.upconv3(out_iconv4), out_conv2)
        disp4_up = crop_like(F.interpolate(disp4, scale_factor=2, mode='bilinear', align_corners=False), out_conv2)
        concat3 = torch.cat((out_upconv3, out_conv2, disp4_up), 1)
        out_iconv3 = self.iconv3(concat3)
        disp3 = self.alpha * self.predict_disp3(out_iconv3) + self.beta

        out_upconv2 = crop_like(self.upconv2(out_iconv3), out_conv1)
        disp3_up = crop_like(F.interpolate(disp3, scale_factor=2, mode='bilinear', align_corners=False), out_conv1)
        concat2 = torch.cat((out_upconv2, out_conv1, disp3_up), 1)
        out_iconv2 = self.iconv2(concat2)
        disp2 = self.alpha * self.predict_disp2(out_iconv2) + self.beta

        out_upconv1 = crop_like(self.upconv1(out_iconv2), x)
        disp2_up = crop_like(F.interpolate(disp2, scale_factor=2, mode='bilinear', align_corners=False), x)
        concat1 = torch.cat((out_upconv1, disp2_up), 1)
        out_iconv1 = self.iconv1(concat1)
        disp1 = self.alpha * self.predict_disp1(out_iconv1) + self.beta

        if self.training:
            return disp1, disp2, disp3, disp4
        else:
            return disp1




class SEModule(nn.Module):

    def __init__(self, channels, reduction):
        super(SEModule, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,
                             padding=0)
        self.relu = nn.ReLU(inplace=True)
        self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,
                             padding=0)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        module_input = x
        x = self.avg_pool(x)
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        x = self.sigmoid(x)
        return module_input * x


class Bottleneck(nn.Module):
    """
    Base class for bottlenecks that implements `forward()` method.
    """

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        # out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        # out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        # out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out = self.se_module(out) + residual
        out = self.relu(out)

        return out


class SEBottleneck(Bottleneck):
    """
    Bottleneck for SENet154.
    """
    expansion = 4

    def __init__(self, inplanes, planes, groups, reduction, stride=1,
                 downsample=None):
        super(SEBottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes * 2)
        self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,
                               stride=stride, padding=1, groups=groups,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(planes * 4)
        self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,
                               bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.se_module = SEModule(planes * 4, reduction=reduction)
        self.downsample = downsample
        self.stride = stride


class SEResNetBottleneck(Bottleneck):
    """
    ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
    implementation and uses `stride=stride` in `conv1` and not in `conv2`
    (the latter is used in the torchvision implementation of ResNet).
    """
    expansion = 4

    def __init__(self, inplanes, planes, groups, reduction, stride=1,
                 downsample=None):
        super(SEResNetBottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,
                               stride=stride)
        # self.bn1 = nn.BatchNorm2d(planes)
        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,
                               groups=groups, bias=False)
        # self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
        # self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.se_module = SEModule(planes * 4, reduction=reduction)
        self.downsample = downsample
        self.stride = stride


class SEResNeXtBottleneck(Bottleneck):
    """
    ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
    """
    expansion = 4

    def __init__(self, inplanes, planes, groups, reduction, stride=1,
                 downsample=None, base_width=4):
        super(SEResNeXtBottleneck, self).__init__()
        # width = math.floor(planes * (base_width / 64)) * groups
        # pdb.set_trace()
        width = int(planes * base_width / 64) * groups
        self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,
                               stride=1)
        self.bn1 = nn.BatchNorm2d(width)
        self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,
                               padding=1, groups=groups, bias=False)
        self.bn2 = nn.BatchNorm2d(width)
        self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.se_module = SEModule(planes * 4, reduction=reduction)
        self.downsample = downsample
        self.stride = stride


def initialize_pretrained_model(model, num_classes, settings):
    assert num_classes == settings['num_classes'], \
        'num_classes should be {}, but is {}'.format(
            settings['num_classes'], num_classes)
    model.load_state_dict(model_zoo.load_url(settings['url'], 'pretrained_model/encoder'))
    model.input_space = settings['input_space']
    model.input_size = settings['input_size']
    model.input_range = settings['input_range']
    model.mean = settings['mean']
    model.std = settings['std']


def senet154(num_classes=1000, pretrained='imagenet'):
    model = SENet(SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16,
                  dropout_p=0.2, num_classes=num_classes)
    if pretrained is not None:
        settings = pretrained_settings['senet154'][pretrained]
        initialize_pretrained_model(model, num_classes, settings)
    return model


def se_resnet50(num_classes=1000, pretrained='imagenet'):
    model = SENet(SEResNetBottleneck, [3, 4, 6, 3], groups=1, reduction=16,
                  dropout_p=None, inplanes=64, input_3x3=False,
                  downsample_kernel_size=1, downsample_padding=0,
                  num_classes=num_classes)
    if pretrained is not None:
        settings = pretrained_settings['se_resnet50'][pretrained]
        initialize_pretrained_model(model, num_classes, settings)
    return model


def se_resnet101(num_classes=1000, pretrained='imagenet'):
    model = SENet(SEResNetBottleneck, [3, 4, 23, 3], groups=1, reduction=16,
                  dropout_p=None, inplanes=64, input_3x3=False,
                  downsample_kernel_size=1, downsample_padding=0,
                  num_classes=num_classes)
    if pretrained is not None:
        settings = pretrained_settings['se_resnet101'][pretrained]
        initialize_pretrained_model(model, num_classes, settings)
    return model


def se_resnet152(num_classes=1000, pretrained='imagenet'):
    model = SENet(SEResNetBottleneck, [3, 8, 36, 3], groups=1, reduction=16,
                  dropout_p=None, inplanes=64, input_3x3=False,
                  downsample_kernel_size=1, downsample_padding=0,
                  num_classes=num_classes)
    if pretrained is not None:
        settings = pretrained_settings['se_resnet152'][pretrained]
        initialize_pretrained_model(model, num_classes, settings)
    return model


def se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'):
    model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16,
                  dropout_p=None, inplanes=64, input_3x3=False,
                  downsample_kernel_size=1, downsample_padding=0,
                  num_classes=num_classes)
    if pretrained is not None:
        settings = pretrained_settings['se_resnext50_32x4d'][pretrained]
        initialize_pretrained_model(model, num_classes, settings)
    return model


def se_resnext101_32x4d(num_classes=1000, pretrained='imagenet'):
    model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16,
                  dropout_p=None, inplanes=64, input_3x3=False,
                  downsample_kernel_size=1, downsample_padding=0,
                  num_classes=num_classes)
    if pretrained is not None:
        settings = pretrained_settings['se_resnext101_32x4d'][pretrained]
        initialize_pretrained_model(model, num_classes, settings)
    return model
