import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
import os


class RefineDetVGG16(nn.Module):
    """

    """
    def __init__(self, phase, size, num_classes):
        """

        :param phase:
        :param size:
        :param num_classes:
        """
        super(RefineDetVGG16, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.size = size

        # extra
        self.L2Norm = L2Norm(512, 20)

        # vgg base network
        self.vgg_conv1 = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(64),
                                       nn.ReLU(),
                                       nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(64),
                                       nn.ReLU())
        self.vgg_conv2 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(128),
                                       nn.ReLU(),
                                       nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(128),
                                       nn.ReLU())
        self.vgg_conv3 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(256),
                                       nn.ReLU(),
                                       nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(256),
                                       nn.ReLU(),
                                       nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(256),
                                       nn.ReLU())
        self.vgg_conv4 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(512),
                                       nn.ReLU(),
                                       nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(512),
                                       nn.ReLU(),
                                       nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(512),
                                       nn.ReLU())
        self.vgg_conv5 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(512),
                                       nn.ReLU(),
                                       nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(512),
                                       nn.ReLU(),
                                       nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
                                       nn.BatchNorm2d(512),
                                       nn.ReLU())
        self.fc6 = nn.Sequential(nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=3, dilation=3),
                                 nn.BatchNorm2d(1024),
                                 nn.ReLU())
        self.fc7 = nn.Sequential(nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding=0),
                                 nn.BatchNorm2d(1024),
                                 nn.ReLU())
        self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.relu = nn.ReLU()

        # extra
        self.extras = nn.Sequential(nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0),
                                    nn.BatchNorm2d(256),
                                    nn.ReLU(),
                                    nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),
                                    nn.BatchNorm2d(512),
                                    nn.ReLU())

        # arm loc and conf
        self.arm_loc = nn.ModuleList([nn.Conv2d(512, 12, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(512, 12, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(1024, 12, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(512, 12, kernel_size=3, stride=1, padding=1),
                                      ])
        self.arm_conf = nn.ModuleList([nn.Conv2d(512, 6, kernel_size=3, stride=1, padding=1),
                                       nn.Conv2d(512, 6, kernel_size=3, stride=1, padding=1),
                                       nn.Conv2d(1024, 6, kernel_size=3, stride=1, padding=1),
                                       nn.Conv2d(512, 6, kernel_size=3, stride=1, padding=1)
                                       ])

        # odm loc and conf
        self.odm_loc = nn.ModuleList([nn.Conv2d(256, 12, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(256, 12, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(256, 12, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(256, 12, kernel_size=3, stride=1, padding=1)
                                      ])
        self.odm_conf = nn.ModuleList([nn.Conv2d(256, 3 * num_classes, kernel_size=3, stride=1, padding=1),
                                       nn.Conv2d(256, 3 * num_classes, kernel_size=3, stride=1, padding=1),
                                       nn.Conv2d(256, 3 * num_classes, kernel_size=3, stride=1, padding=1),
                                       nn.Conv2d(256, 3 * num_classes, kernel_size=3, stride=1, padding=1)
                                       ])

        self.trans_layers = nn.ModuleList([nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.BatchNorm2d(256),
                                                         nn.ReLU(inplace=True),
                                                         nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.BatchNorm2d(256)),
                                           nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.BatchNorm2d(256),
                                                         nn.ReLU(inplace=True),
                                                         nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.BatchNorm2d(256)),
                                           nn.Sequential(nn.Conv2d(1024, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.BatchNorm2d(256),
                                                         nn.ReLU(inplace=True),
                                                         nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.BatchNorm2d(256))
                                           ])
        self.up_layers = nn.ModuleList([nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0),
                                        nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0),
                                        nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0)])
        self.latent_layrs = nn.ModuleList([nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                           nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                           nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
                                           ])

        self.P6 = nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
                                nn.BatchNorm2d(256),
                                nn.ReLU(),
                                nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                nn.BatchNorm2d(256),
                                nn.ReLU(),
                                nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                nn.BatchNorm2d(256),
                                nn.ReLU())

        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        arm_sources = []
        arm_loc_list = []
        arm_conf_list = []

        odm_loc_list = []
        odm_conf_list = []
        # conv1
        x = self.vgg_conv1(x)
        x = self.max_pool(x)
        # conv2
        x = self.vgg_conv2(x)
        x = self.max_pool(x)
        # conv3
        x = self.vgg_conv3(x)
        x = self.max_pool(x)
        # conv4
        x = self.vgg_conv4(x)
        ## priorbox
        arm_sources += [self.L2Norm(x)]
        x = self.max_pool(x)
        # conv5
        x = self.vgg_conv5(x)
        arm_sources += [self.L2Norm(x)]
        x = self.max_pool(x)
        # fc6
        x = self.fc6(x)
        # fc7
        x = self.fc7(x)
        arm_sources += [x]
        # extra
        x = self.extras(x)
        arm_sources += [x]

        ## arm head: conf and loc
        for (x, l, c) in zip(arm_sources, self.arm_loc, self.arm_conf):
            arm_loc_list += [l(x).permute(0, 2, 3, 1).contiguous()]
            arm_conf_list += [c(x).permute(0, 2, 3, 1).contiguous()]
        arm_loc = torch.cat([o.view(o.size(0), -1) for o in arm_loc_list], 1)
        arm_conf = torch.cat([o.view(o.size(0), -1) for o in arm_conf_list], 1)

        # p6
        p6 = self.P6(arm_sources[3])
        p6_up = self.up_layers[2](p6)

        # p5
        x = self.trans_layers[2](arm_sources[2])
        p5 = self.relu(x + p6_up)
        p5 = self.relu(self.latent_layrs[2](p5))
        p5_up = self.up_layers[1](p5)

        # p4
        x = self.trans_layers[1](arm_sources[1])
        p4 = self.relu(x + p5_up)
        p4 = self.relu(self.latent_layrs[1](p4))
        p4_up = self.up_layers[0](p4)

        # p3
        x = self.trans_layers[0](arm_sources[0])
        p3 = self.relu(x + p4_up)
        p3 = self.relu(self.latent_layrs[0](p3))

        odm_source = [p3, p4, p5, p6]

        ## odm head: conf and loc
        for(x, l, c) in zip(odm_source, self.odm_loc, self.odm_conf):
            odm_loc_list += [l(x).permute(0, 2, 3, 1).contiguous()]
            odm_conf_list += [c(x).permute(0, 2, 3, 1).contiguous()]
        odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc_list], 1)
        odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf_list], 1)

        if self.phase == 'test':
            output = (
                arm_loc.view(arm_loc.size(0), -1, 4),
                self.softmax(arm_conf.view(arm_conf.size(0), -1, 2)),
                odm_loc.view(odm_loc.size(0), -1, 4),
                self.softmax(odm_conf.view(odm_conf.size(0), -1, self.num_classes))
            )
        else:
            output = (
                arm_loc.view(arm_loc.size(0), -1, 4),
                arm_conf.view(arm_conf.size(0), -1, 2),
                odm_loc.view(odm_loc.size(0), -1, 4),
                odm_conf.view(odm_conf.size(0), -1, self.num_classes))
        return output

