import torch
import numpy as np
import torch.nn as nn
from math import ceil
from torch.autograd import Variable
from icnet_utils import *
from torchvision.models import resnet50
from torch.nn import BatchNorm2d as BatchNorm
import math


class icnet(nn.Module):
    """
    Image Cascade Network
    URL: https://arxiv.org/abs/1704.08545
    References:
    1) Original Author's code: https://github.com/hszhao/ICNet
    2) Chainer implementation by @mitmul: https://github.com/mitmul/chainer-pspnet
    3) TensorFlow implementation by @hellochick: https://github.com/hellochick/ICNet-tensorflow
    """

    def __init__(self,
                 n_classes=19,
                 block_config=[3, 4, 6, 3],
                 with_bn=True):
        super(icnet, self).__init__()
        bias = not with_bn
        # Encoder
        self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=16,
                                                 padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=16,
                                                 padding=1, stride=1, bias=bias, with_bn=with_bn)
        self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=32,
                                                 padding=1, stride=1, bias=bias, with_bn=with_bn)

        # Vanilla Residual Blocks
        self.res_block2 = residualBlockPSP(block_config[0], 32, 16, 64, 1, 1, with_bn=with_bn)
        self.res_block3_conv = residualBlockPSP(block_config[1], 64, 32, 128, 2, 1, include_range='conv',
                                                with_bn=with_bn)
        self.res_block3_identity = residualBlockPSP(block_config[1], 64, 32, 128, 2, 1, include_range='identity',
                                                    with_bn=with_bn)

        # Dilated Residual Blocks
        self.res_block4 = residualBlockPSP(block_config[2], 128, 64, 256, 1, 2, with_bn=with_bn)
        self.res_block5 = residualBlockPSP(block_config[3], 256, 128, 512, 1, 4, with_bn=with_bn)

        # Pyramid Pooling Module
        self.pyramid_pooling = pyramidPooling(512, [6, 3, 2, 1], model_name='icnet', fusion_mode='sum', with_bn=with_bn)

        # Final conv layer with kernel 1 in sub4 branch
        self.conv5_4_k1 = conv2DBatchNormRelu(in_channels=512, k_size=1, n_filters=128,
                                              padding=0, stride=1, bias=bias, with_bn=with_bn)

        # High-resolution (sub1) branch
        self.convbnrelu1_sub1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=16,
                                                    padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.convbnrelu2_sub1 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=16,
                                                    padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.convbnrelu3_sub1 = conv2DBatchNormRelu(in_channels=16, k_size=3, n_filters=32,
                                                    padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.classification = nn.Conv2d(64, n_classes, 1, 1, 0)

        # Cascade Feature Fusion Units
        self.cff_sub24 = cascadeFeatureFusion(n_classes, 128, 128, 64, with_bn=with_bn)
        self.cff_sub12 = cascadeFeatureFusion(n_classes, 64, 32, 64, with_bn=with_bn)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, BatchNorm):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def forward(self, x):
        h, w = x.shape[2:]
        # H, W -> H/2, W/2
        x_sub2 = interp(x, output_size=get_interp_size(x, s_factor=2))

        # H/2, W/2 -> H/4, W/4
        x_sub2 = self.convbnrelu1_1(x_sub2)
        x_sub2 = self.convbnrelu1_2(x_sub2)
        x_sub2 = self.convbnrelu1_3(x_sub2)

        # H/4, W/4 -> H/8, W/8
        x_sub2 = F.max_pool2d(x_sub2, 3, 2, 1)

        # H/8, W/8 -> H/16, W/16
        x_sub2 = self.res_block2(x_sub2)
        x_sub2 = self.res_block3_conv(x_sub2)
        # H/16, W/16 -> H/32, W/32
        x_sub4 = interp(x_sub2, output_size=get_interp_size(x_sub2, s_factor=2))
        x_sub4 = self.res_block3_identity(x_sub4)

        x_sub4 = self.res_block4(x_sub4)
        x_sub4 = self.res_block5(x_sub4)
        x_sub4 = self.pyramid_pooling(x_sub4)
        x_sub4 = self.conv5_4_k1(x_sub4)

        x_sub1 = self.convbnrelu1_sub1(x)
        x_sub1 = self.convbnrelu2_sub1(x_sub1)
        x_sub1 = self.convbnrelu3_sub1(x_sub1)
        x_sub24, sub4_cls = self.cff_sub24(x_sub4, x_sub2)
        x_sub12, sub24_cls = self.cff_sub12(x_sub24, x_sub1)

        x_sub12 = F.upsample(x_sub12, size=get_interp_size(x_sub12, z_factor=2), mode='bilinear')

        sub124_cls = self.classification(x_sub12)
        if self.training:
            return sub4_cls, sub24_cls, sub124_cls
        else:  # eval mode
            # sub124_cls = F.upsample(sub124_cls, size=get_interp_size(sub124_cls, z_factor=4), mode='bilinear') # Test only
            return sub124_cls


def icnet_portrait(num_classes=2, pretrained=True):
    base_model = icnet()
    # if pretrained:
    #     base_model.load_state_dict(torch.load("/home/detao/Desktop/segmentation/pytorch-segmentation/model/icnet/icnet_cityscapes_trainval_90k.pth.tar"))
    base_model.classification = nn.Conv2d(64, num_classes, 1, 1, 0)
    return base_model


# For Testing Purposes only
if __name__ == '__main__':
    import os
    import time
    import scipy.misc as m

    model = icnet_portrait()
    model.float()
    model.eval()
    image_test = "/home/detao/Desktop/seg_inference/seg/val/0567.jpg"
    img = m.imread(image_test)
    m.imsave('test_input.png', img)
    orig_size = img.shape[:-1]
    img = m.imresize(img, (385, 385))  # uint8 with RGB mode
    img = img.transpose(2, 0, 1)
    img = img.astype(np.float64)
    img -= np.array([123.68, 116.779, 103.939])[:, None, None]
    img = np.copy(img[::-1, :, :])
    img = torch.from_numpy(img).float()
    img = img.unsqueeze(0)

    print(img.size())
    img = Variable(img)
    starttime = time.time()
    for i in range(100):
        out = model(img)
    print(time.time() - starttime)
    print(out.size())

    out = F.softmax(out, dim=1).data.cpu().numpy()
    print(out.shape)
    pred = np.argmax(out, axis=1)[0]
    print(pred.shape)
