"""
FPN for retina in pytorch
"""

import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable

from config import config

class Bottleneck(nn.Module):
    """
    refer to https://github.com/pytorch/vision/blob/5b75a277423dd455df05b9789689c5414b334daf/torchvision/models/resnet.py
    """
    expansion   = 4

    def __init__(self, in_planes, planes, stride=1, downsample=None):
        """
        initialization: define many layers
        """
        super(Bottleneck, self).__init__()
        
        self.conv1  = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
        self.bn1    = nn.BatchNorm2d(planes)

        self.conv2  = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn2    = nn.BatchNorm2d(planes)

        self.conv3  = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3    = nn.Conv2d(planes * self.expansion)

        self.stride = stride
        self.downsample = downsample

    def forward(self, x):
        out1        = F.relu(self.bn1(self.conv1(x)))
        out2        = F.relu(self.bn2(self.conv2(out1)))
        out3        = self.bn3(self.conv3(out2))

        if self.downsample is not None:
            out4    = out3 + x
        else:
            out4    = out3
        out5        = F.relu(out4)
        return out5

class fpnNet(nn.Module):
    """
    fpn module
    """
    def __init__(self, block, num_blocks):
        """
        initialization of fpnNet
        here not considering classfication, so no classNum
        Input:  block: the block module for resnet
                num_blocks:     the block number of each block type
        """
        super(fpnNet, self).__init__()
        self.in_planes  = 64

        # pre conv layers
        self.conv1      = nn.Conv2d(3, self.in_planes, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1        = nn.BatchNorm2d(self.in_planes)

        # bottom-up layers
        self.layer1     = self._make_layer(block, 64, num_blocks[0], stride=1)
        self.layer2     = self._make_layer(block, 128, num_blocks[1], stride=2)
        self.layer3     = self._make_layer(block, 256, num_blocks[2], stride=2)
        self.layer4     = self._make_layer(block, 512, num_blocks[3], stride=2)
        self.conv6      = nn.Conv2d(512 * block.expansion, 256, kernel_size=3, stride=2, padding=1)
        self.conv7      = nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1)

        # lateral layers
        self.latlayer1  = nn.Conv2d(512 * block.expansion, 256, kernel_size=1, stride=1, padding=0)
        self.latlayer2  = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
        self.latlayer3  = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)

        # top-down layers
        self.toplayer1  = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
        self.toplayer2  = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)

    def _upsample_add(self, x, y):
        """
        Upsample x then plus the y
        Input:  x: top feature map which will be upsampled
                y: lateral feature map which will be plus
        """
        _, _, H, W  = y.size()[0:4]
        return F.upsample(x, size=(H,W), mode='bilinear') + y

    def _make_layer(self, block, planes, num_blocks, stride):
        """
        make block group where block type is the same
        """
        downsample  = None
        if stride != 1 or self.in_planes != planes * block.expansion:
            downsample  = nn.Sequential(\
                            nn.Conv2d(self.in_planes, planes * block.expansion, \
                                      kernel_size=1,stride=stride, bias=False), \
                            nn.BatchNorm2d(planes * block.expansion))
        layers  = []
        layers.append()

        return nn.Sequential(*layers)

    def forward(self, x):
        # forward procedure
        # bottom-up
        c1  = F.relu(self.bn1(self.conv1(x)))
        c1  = F.max_pool2d(c1, kernel_size=3, stride=2, padding=1)

        c2  = self.layer1(c1)
        c3  = self.layer2(c2)
        c4  = self.layer3(c3)
        c5  = self.layer4(c4)
        p6  = F.relu(self.conv6(c5))
        p7  = self.conv7(p6)

        # top-down
        p5  = self.latlayer1(c5)
        p4  = self._upsample_add(p5, self.latlayer2(c4))
        p4  = self.toplayer1(p4)
        p3  = self._upsample_add(p4, self.latlayer3(c3))
        p3  = self.toplayer2(p3)
        
        return p3, p4, p5, p6, p7

def FPN50():
    return fpnNet(Bottleneck, [3,4,6,3])
def FPN101():
    return fpnNet(Bottleneck, [2,4,23,3])
def test():
    net     = FPN50()
    inp     = Variable(torch.randn(1,3,600,300))
    if config.useGpu:
        net = net.cuda()
        inp = inp.cuda()
    fms     = net(inp)

    if config.useGpu:
        fms = fms.cpu()
    for fm in fms:
        print(fm.size())
