import torch
import torch.nn.functional as F
from torch import nn
from torchvision import models



def initialize_weights(*models):
    for model in models:
        for module in model.modules():
            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
                nn.init.kaiming_normal(module.weight)
                if module.bias is not None:
                    module.bias.data.zero_()
            elif isinstance(module, nn.BatchNorm2d):
                module.weight.data.fill_(1)
                module.bias.data.zero_()

# many are borrowed from https://github.com/ycszen/pytorch-ss/blob/master/gcn.py
class _GlobalConvModule(nn.Module):
    def __init__(self, in_dim, out_dim, kernel_size):
        super(_GlobalConvModule, self).__init__()
        pad0 = (kernel_size[0] - 1) / 2
        pad1 = (kernel_size[1] - 1) / 2
        # kernel size had better be odd number so as to avoid alignment error
        super(_GlobalConvModule, self).__init__()
        self.conv_l1 = nn.Conv2d(in_dim, out_dim, kernel_size=(kernel_size[0], 1),
                                 padding=(pad0, 0))
        self.conv_l2 = nn.Conv2d(out_dim, out_dim, kernel_size=(1, kernel_size[1]),
                                 padding=(0, pad1))
        self.conv_r1 = nn.Conv2d(in_dim, out_dim, kernel_size=(1, kernel_size[1]),
                                 padding=(0, pad1))
        self.conv_r2 = nn.Conv2d(out_dim, out_dim, kernel_size=(kernel_size[0], 1),
                                 padding=(pad0, 0))

    def forward(self, x):
        x_l = self.conv_l1(x)
        x_l = self.conv_l2(x_l)
        x_r = self.conv_r1(x)
        x_r = self.conv_r2(x_r)
        x = x_l + x_r
        return x


class _BoundaryRefineModule(nn.Module):
    def __init__(self, dim):
        super(_BoundaryRefineModule, self).__init__()
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1)

    def forward(self, x):
        residual = self.conv1(x)
        residual = self.relu(residual)
        residual = self.conv2(residual)
        out = x + residual
        return out

class _BoundaryRefineModule2(nn.Module):
    def __init__(self, indim,outdim):
        super(_BoundaryRefineModule2, self).__init__()
        self.relu = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(indim, outdim, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(outdim, outdim, kernel_size=3, padding=1)
        self.shortcut = nn.Conv2d(indim, outdim, kernel_size=3, padding=1)

    def forward(self, x):
        residual = self.conv1(x)
        residual = self.relu(residual)
        residual = self.conv2(residual)
        out = self.shortcut(x) + residual
        return out

class Bottleneck(nn.Module):
    squeeze = 8

    def __init__(self, inplanes, outplanes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, outplanes//self.squeeze, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(outplanes//self.squeeze)
        self.conv2 = nn.Conv2d(outplanes//self.squeeze, outplanes//self.squeeze, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(outplanes//self.squeeze)
        self.conv3 = nn.Conv2d(outplanes//self.squeeze, outplanes, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(outplanes)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out

class Bottleneck_up(nn.Module):

    def __init__(self, inplanes, outplanes, stride=1, upsample=None, squeeze = 8):
        super(Bottleneck_up, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, inplanes//squeeze, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(inplanes//squeeze)
        self.conv2 = nn.ConvTranspose2d(inplanes//squeeze, inplanes//squeeze, kernel_size=3, stride=stride,
                               padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(inplanes//squeeze)
        self.conv3 = nn.Conv2d(inplanes//squeeze, outplanes, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(outplanes)
        self.relu = nn.ReLU(inplace=True)
        self.upsample = upsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.upsample is not None:
            residual = self.upsample(x)

        out += residual
        out = self.relu(out)

        return out

class _EncoderBlock(nn.Module):
    def __init__(self, in_channels, out_channels, num_block, stride):
        super(_EncoderBlock, self).__init__()
        self.inplanes = in_channels
        self.layer1 = self._make_layer(Bottleneck, out_channels, num_block, stride)

    def forward(self, x):
        return self.layer1(x)

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

class _DecoderBlock(nn.Module):
    def __init__(self, in_channels, out_channels, num_block, stride,squeeze=8):
        super(_DecoderBlock, self).__init__()
        self.inplanes = in_channels
        self.layer1 = self._make_layer(Bottleneck_up, out_channels, num_block,stride,squeeze)

    def forward(self, x):
        return self.layer1(x)

    def _make_layer(self, block, planes, blocks, stride=1,squeeze=8):

        upsample = nn.Sequential(
            nn.ConvTranspose2d(self.inplanes, planes, kernel_size=1, stride=stride, bias=False),
            nn.BatchNorm2d(planes),
        )

        layers = []

        for i in range(1, blocks):
            layers.append(block(self.inplanes, self.inplanes,squeeze=squeeze))
        layers.append(block(self.inplanes, planes, stride, upsample,squeeze=squeeze))

        return nn.Sequential(*layers)



class FigSeg(nn.Module):
    '''
    0.892
    '''
    def __init__(self, num_classes):
        super(FigSeg, self).__init__()
        self.enc0 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                      bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
        )
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.enc1 = _EncoderBlock(64, 128,num_block=3, stride=1)  #1/2
        self.enc2 = _EncoderBlock(128, 256, num_block=4, stride=2) # 1/4
        self.enc3 = _EncoderBlock(256, 512,num_block=6, stride=2)  # 1/8
        self.enc4 = _EncoderBlock(512, 1024,num_block=3, stride=2)  # 1/16


        self.layer0 = self.enc0
        self.layer1 = nn.Sequential(self.maxpool,self.enc1)
        self.layer2 = self.enc2
        self.layer3 = self.enc3
        self.layer4 = self.enc4

        self.gcm1 = _GlobalConvModule(1024, num_classes, (7, 7))
        self.gcm2 = _GlobalConvModule(512, num_classes, (7, 7))
        self.gcm3 = _GlobalConvModule(256, num_classes, (7, 7))
        self.gcm4 = _GlobalConvModule(128, num_classes, (7, 7))

        self.brm1 = _BoundaryRefineModule(num_classes)
        self.brm2 = _BoundaryRefineModule(num_classes)
        self.brm3 = _BoundaryRefineModule(num_classes)
        self.brm4 = _BoundaryRefineModule(num_classes)
        self.brm5 = _BoundaryRefineModule2(2*num_classes,num_classes)
        self.brm6 = _BoundaryRefineModule2(2*num_classes,num_classes)
        self.brm7 = _BoundaryRefineModule2(2*num_classes,num_classes)
        self.brm8 = _BoundaryRefineModule2(64+num_classes,16)
        self.brm9 = _BoundaryRefineModule(num_classes)
        self.dec = _DecoderBlock(16, num_classes, num_block=3, stride=2,squeeze=1)  # 1 de3 cat en1

        initialize_weights(self.gcm1, self.gcm2, self.gcm3, self.gcm4, self.brm1, self.brm2, self.brm3,
                           self.brm4, self.brm5, self.brm6, self.brm7, self.brm8, self.brm9)

    def forward(self, x):
        H,W = x.size()[2:]
        # if x: 512

        fm0 = self.layer0(x)  # 64 1/2

        fm1 = self.layer1(fm0)  # 128 1/4

        fm2 = self.layer2(fm1)  # 256 1/8

        fm3 = self.layer3(fm2)  # 512 1/16

        fm4 = self.layer4(fm3)  # 1024  1/32


        gcfm1 = self.brm1(self.gcm1(fm4))  # 16  1/32

        gcfm2 = self.brm2(self.gcm2(fm3))  # 32
        gcfm3 = self.brm3(self.gcm3(fm2))  # 64()

        gcfm4 = self.brm4(self.gcm4(fm1))  # 128

        fs1 = self.brm5(torch.cat([F.upsample(gcfm1, fm3.size()[2:],mode='bilinear') , gcfm2],dim=1))  # 32
        fs2 = self.brm6(torch.cat([F.upsample(fs1, fm2.size()[2:],mode='bilinear') , gcfm3],dim=1))  # 64
        fs3 = self.brm7(torch.cat([F.upsample(fs2, fm1.size()[2:],mode='bilinear') , gcfm4],dim=1))  # 128 1/4
        fs4 = self.brm8(torch.cat([F.upsample(fs3, fm0.size()[2:],mode='bilinear'), fm0],dim=1))  # 256 1/2  1/2
        fs4 = self.dec(fs4)  # 16,513,513
        out = self.brm9(fs4)  # 512
        return out

if __name__ =='__main__':
    from torchvision.models import resnet50
    model = resnet50()
    print model
    # model = FigSeg(2)
    # torch.save(model.state_dict(), './testnew.pth')
    # # print model
    #
    # # model = models.resnet50()
    # # print model
    # x = torch.FloatTensor(10,3,513,513)
    # x = torch.autograd.Variable(x)
    # y = model(x)
    # print y.size()