import torch
import numpy as np
import torch.nn as nn
from math import ceil
from torch.autograd import Variable
from icnet_utils import *
from torchvision.models import resnet50
from  torch.nn import BatchNorm2d as BatchNorm
import math
class icnet(nn.Module):
    
    """
    Image Cascade Network
    URL: https://arxiv.org/abs/1704.08545
    References:
    1) Original Author's code: https://github.com/hszhao/ICNet
    2) Chainer implementation by @mitmul: https://github.com/mitmul/chainer-pspnet
    3) TensorFlow implementation by @hellochick: https://github.com/hellochick/ICNet-tensorflow
    """

    def __init__(self, 
                 n_classes=19, 
                 block_config=[3, 4, 6, 3],
                 with_bn=True):
        super(icnet, self).__init__()
        bias = not with_bn
        # Encoder
        self.convbnrelu1_1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=32,
                                                 padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.convbnrelu1_2 = conv2DBatchNormRelu(in_channels=32, k_size=3, n_filters=32,
                                                 padding=1, stride=1, bias=bias, with_bn=with_bn)
        self.convbnrelu1_3 = conv2DBatchNormRelu(in_channels=32, k_size=3, n_filters=64,
                                                 padding=1, stride=1, bias=bias, with_bn=with_bn)

        # Vanilla Residual Blocks
        self.res_block2 = residualBlockPSP(block_config[0], 64, 32, 128, 1, 1, with_bn=with_bn)
        self.res_block3_conv = residualBlockPSP(block_config[1], 128, 64, 256, 2, 1, include_range='conv', with_bn=with_bn)
        self.res_block3_identity = residualBlockPSP(block_config[1], 128, 64, 256, 2, 1, include_range='identity', with_bn=with_bn)
        
        # Dilated Residual Blocks
        self.res_block4 = residualBlockPSP(block_config[2], 256, 128, 512, 1, 2, with_bn=with_bn)
        self.res_block5 = residualBlockPSP(block_config[3], 512, 256, 1024, 1, 4, with_bn=with_bn)
        
        # Pyramid Pooling Module
        self.pyramid_pooling = pyramidPooling(1024, [6, 3, 2, 1], model_name='icnet', fusion_mode='sum', with_bn=with_bn)
       
        # Final conv layer with kernel 1 in sub4 branch
        self.conv5_4_k1 = conv2DBatchNormRelu(in_channels=1024, k_size=1, n_filters=256,
                                              padding=0, stride=1, bias=bias, with_bn=with_bn)

        # High-resolution (sub1) branch
        self.convbnrelu1_sub1 = conv2DBatchNormRelu(in_channels=3, k_size=3, n_filters=32,
                                                    padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.convbnrelu2_sub1 = conv2DBatchNormRelu(in_channels=32, k_size=3, n_filters=32,
                                                    padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.convbnrelu3_sub1 = conv2DBatchNormRelu(in_channels=32, k_size=3, n_filters=64,
                                                    padding=1, stride=2, bias=bias, with_bn=with_bn)
        self.classification = nn.Conv2d(128, n_classes, 1, 1, 0)

        # Cascade Feature Fusion Units
        self.cff_sub24 = cascadeFeatureFusion(n_classes, 256, 256, 128, with_bn=with_bn)
        self.cff_sub12 = cascadeFeatureFusion(n_classes, 128, 64, 128, with_bn=with_bn)
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, BatchNorm):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def forward(self, x):
        h, w = x.shape[2:]
        # H, W -> H/2, W/2
        x_sub2 = interp(x, output_size=get_interp_size(x, s_factor=2))

        # H/2, W/2 -> H/4, W/4
        x_sub2 = self.convbnrelu1_1(x_sub2)
        x_sub2 = self.convbnrelu1_2(x_sub2)
        x_sub2 = self.convbnrelu1_3(x_sub2)

        # H/4, W/4 -> H/8, W/8
        x_sub2 = F.max_pool2d(x_sub2, 3, 2, 1)

        # H/8, W/8 -> H/16, W/16
        x_sub2 = self.res_block2(x_sub2)
        x_sub2 = self.res_block3_conv(x_sub2)
        # H/16, W/16 -> H/32, W/32
        x_sub4 = interp(x_sub2, output_size=get_interp_size(x_sub2, s_factor=2))
        x_sub4 = self.res_block3_identity(x_sub4)

        x_sub4 = self.res_block4(x_sub4)
        x_sub4 = self.res_block5(x_sub4)
        x_sub4 = self.pyramid_pooling(x_sub4)
        x_sub4 = self.conv5_4_k1(x_sub4)

        x_sub1 = self.convbnrelu1_sub1(x)
        x_sub1 = self.convbnrelu2_sub1(x_sub1)
        x_sub1 = self.convbnrelu3_sub1(x_sub1)
        x_sub24, sub4_cls = self.cff_sub24(x_sub4, x_sub2)
        x_sub12, sub24_cls = self.cff_sub12(x_sub24, x_sub1)

        x_sub12 = F.upsample(x_sub12, size=get_interp_size(x_sub12, z_factor=2), mode='bilinear')

        sub124_cls = self.classification(x_sub12)
        if self.training:
            return sub4_cls, sub24_cls, sub124_cls
        else: # eval mode
            # sub124_cls = F.upsample(sub124_cls, size=get_interp_size(sub124_cls, z_factor=4), mode='bilinear') # Test only
            return sub124_cls

def icnet_portrait(num_classes=2):
    base_model =icnet()
    # if pretrained:
    #     base_model.load_state_dict(torch.load("/home/detao/Desktop/segmentation/pytorch-segmentation/model/icnet/icnet_cityscapes_trainval_90k.pth.tar"))
    base_model.classification =nn.Conv2d(128, num_classes, 1, 1, 0)
    return base_model
# For Testing Purposes only
if __name__ == '__main__':
    import os
    import time
    import scipy.misc as m
    model = icnet_portrait(pretrained=False)
    x = Variable(torch.from_numpy(np.empty((1,3,385,385))).float())
    y = model(x)
    print y[0].size(),y[1].size(),y[2].size()
    # model.eval()
    # t0 = time.time()
    # for i in range(100):
    #     print i
    #     model(x)
    # t1 = time.time()
    # print t1 - t0