"""Dual Path Networks in PyTorch."""
import torch
import torch.nn as nn
import torch.nn.functional as F

from torch.autograd import Variable


class Bottleneck(nn.Module):
    def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer,deconv=None):
        super(Bottleneck, self).__init__()
        self.out_planes = out_planes
        self.dense_depth = dense_depth

        self.shortcut = nn.Sequential()

        if not deconv:
            self.deconv=False
            self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
            self.bn1 = nn.BatchNorm2d(in_planes)
            self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
            self.bn2 = nn.BatchNorm2d(in_planes)
            self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
            self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)

            if first_layer:
                self.shortcut = nn.Sequential(
                    nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
                    nn.BatchNorm2d(out_planes+dense_depth)
                )

        else:
            self.deconv=True
            self.conv1 = deconv(last_planes, in_planes, kernel_size=1, bias=True)
            self.conv2 = deconv(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=True)
            self.conv3 = deconv(in_planes, out_planes+dense_depth, kernel_size=1, bias=True)

            if first_layer:
                self.shortcut = deconv(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=True)

    def forward(self, x):
        if self.deconv:
            out = F.relu(self.conv1(x))
            out = F.relu(self.conv2(out))
            out = self.conv3(out)
        else:
            out = F.relu(self.bn1(self.conv1(x)))
            out = F.relu(self.bn2(self.conv2(out)))
            out = self.bn3(self.conv3(out))
        x = self.shortcut(x)
        d = self.out_planes
        out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
        out = F.relu(out)
        return out


class DPN(nn.Module):
    def __init__(self, cfg, num_classes=10, deconv=None,delinear=None,channel_deconv=None):
        super(DPN, self).__init__()
        in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
        num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']

        if deconv is None:
            self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
            self.bn1 = nn.BatchNorm2d(64)
        else:
            self.conv1 = deconv(3, 64, kernel_size=3, stride=1, padding=1, bias=True,freeze=True,n_iter=10)

        if channel_deconv:
            self.deconv1=channel_deconv()

        self.last_planes = 64
        self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1, deconv=deconv)
        self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2, deconv=deconv)
        self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2, deconv=deconv)
        self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2, deconv=deconv)
        if delinear:
            self.linear = delinear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], num_classes)
        else:
            self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], num_classes)

    def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride, deconv):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for i,stride in enumerate(strides):
            layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0, deconv))
            self.last_planes = out_planes + (i+2) * dense_depth
        return nn.Sequential(*layers)

    def forward(self, x):
        if hasattr(self, 'bn1'):
            out = F.relu(self.bn1(self.conv1(x)))
        else:
            out = F.relu(self.conv1(x))

        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        if hasattr(self, 'deconv1'):
            out = self.deconv1(out)
        out = F.avg_pool2d(out, 4)
        out = out.view(out.size(0), -1)
        out = self.linear(out)
        return out


def DPN26(num_classes,deconv,delinear,channel_deconv):
    cfg = {
        'in_planes': (96,192,384,768),
        'out_planes': (256,512,1024,2048),
        'num_blocks': (2,2,2,2),
        'dense_depth': (16,32,24,128)
    }
    return DPN(cfg,num_classes,deconv,channel_deconv)

def DPN92(num_classes,deconv,delinear,channel_deconv):
    cfg = {
        'in_planes': (96,192,384,768),
        'out_planes': (256,512,1024,2048),
        'num_blocks': (3,4,20,3),
        'dense_depth': (16,32,24,128)
    }
    return DPN(cfg,num_classes,deconv,delinear,channel_deconv)


def test():
    net = DPN92()
    x = Variable(torch.randn(1,3,32,32))
    y = net(x)
    print(y)

# test()
