import os
import torch
import torch.nn as nn
import torch.nn.functional as F

from layers import *
from .networks import ConvOffset2d, Bottleneck

class RefineSSD_resnet(nn.Module):

    def __init__(self,size,res_layers,num_classes=21, phase='train', def_groups=1, multihead=False):
        super(RefineSSD_resnet, self).__init__()
        self.num_classes = num_classes
        self.size = size
        self.phase = phase
        self.def_groups=def_groups
        self.multihead=multihead
        num_box = 3

        # SSD network
        self.L2Norm_4_3 = L2Norm(512, 10)
        self.L2Norm_5_3 = L2Norm(1024,8)

        self.inplanes = 64
        conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        bn1 = nn.BatchNorm2d(64)
        relu = nn.ReLU(inplace=True)
        maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.maxpool=maxpool
        layer1 = self._make_layer(Bottleneck, 64, res_layers[0])
        layer2 = self._make_layer(Bottleneck, 128, res_layers[1], stride=2)
        layer3 = self._make_layer(Bottleneck, 256, res_layers[2], stride=2)
        layer4 = self._make_layer(Bottleneck, 512, res_layers[3], stride=1)

        avgpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        conv5_pre = nn.Conv2d(2048, 1024, kernel_size=1, bias=False)
        bn5_pre = nn.BatchNorm2d(conv5_pre.out_channels)
        conv5 = nn.Conv2d(1024, 1024, kernel_size=3, padding=1, stride=2,bias=False) #stride =2  8.17
        bn5 = nn.BatchNorm2d(conv5.out_channels)
        self.backbone = nn.ModuleList([conv1, bn1, relu, maxpool, layer1, layer2, layer3,
                                       layer4, avgpool, conv5_pre, bn5_pre, relu, conv5, bn5, relu])

        ################################################################################################
        self.last_layer_trans = nn.Sequential(nn.Conv2d(1024, 256, kernel_size=3, stride=1, padding=1),
                                              nn.ReLU(inplace=True),
                                              nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                              nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1))

        self.arm_loc = nn.ModuleList([nn.Conv2d(256, num_box*4, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(512, num_box*4, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(1024, num_box*4, kernel_size=3, stride=1, padding=1),
                                      nn.Conv2d(1024, num_box*4, kernel_size=3, stride=1, padding=1),
                                      ])
        self.offset = nn.ModuleList(
                [nn.Conv2d(num_box*4, self.def_groups * 2 * 3 * 3, kernel_size=1, stride=1, padding=0),
                 nn.Conv2d(num_box*4, self.def_groups * 2 * 3 * 3, kernel_size=1, stride=1, padding=0),
                 nn.Conv2d(num_box*4, self.def_groups * 2 * 3 * 3, kernel_size=1, stride=1, padding=0),
                 nn.Conv2d(num_box*4, self.def_groups * 2 * 3 * 3, kernel_size=1, stride=1, padding=0),
                 ])

        self.odm_loc = nn.ModuleList([ConvOffset2d(256, num_box*4, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                      ConvOffset2d(256, num_box*4, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                      ConvOffset2d(256, num_box*4, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                      ConvOffset2d(256, num_box*4, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                      ])
        self.odm_conf = nn.ModuleList([ConvOffset2d(256, num_box*self.num_classes, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                       ConvOffset2d(256, num_box*self.num_classes, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                       ConvOffset2d(256, num_box*self.num_classes, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                       ConvOffset2d(256, num_box*self.num_classes, kernel_size=3, stride=1, padding=1, num_deformable_groups=self.def_groups),
                                       ])
        if self.multihead:
            # 5x5
            self.offset2 = nn.ModuleList(
                [nn.Conv2d(num_box * 4, self.def_groups * 2 * 5 * 5, kernel_size=1, stride=1, padding=0),
                 nn.Conv2d(num_box * 4, self.def_groups * 2 * 5 * 5, kernel_size=1, stride=1, padding=0),
                 nn.Conv2d(num_box * 4, self.def_groups * 2 * 5 * 5, kernel_size=1, stride=1, padding=0),
                 nn.Conv2d(num_box * 4, self.def_groups * 2 * 5 * 5, kernel_size=1, stride=1, padding=0),
                 ])
            self.odm_loc_2 = nn.ModuleList([ConvOffset2d(256, num_box * 4, kernel_size=5, stride=1, padding=2,
                                                         dilation=1, num_deformable_groups=self.def_groups),
                                            ConvOffset2d(256, num_box * 4, kernel_size=5, stride=1, padding=2,
                                                         dilation=1, num_deformable_groups=self.def_groups),
                                            ConvOffset2d(256, num_box * 4, kernel_size=5, stride=1, padding=2,
                                                         dilation=1, num_deformable_groups=self.def_groups),
                                            ConvOffset2d(256, num_box * 4, kernel_size=5, stride=1, padding=2,
                                                         dilation=1, num_deformable_groups=self.def_groups),
                                            ])
            self.odm_conf_2 = nn.ModuleList(
                [ConvOffset2d(256, num_box * self.num_classes, kernel_size=5, stride=1, padding=2, dilation=1,
                              num_deformable_groups=self.def_groups),
                 ConvOffset2d(256, num_box * self.num_classes, kernel_size=5, stride=1, padding=2, dilation=1,
                              num_deformable_groups=self.def_groups),
                 ConvOffset2d(256, num_box * self.num_classes, kernel_size=5, stride=1, padding=2, dilation=1,
                              num_deformable_groups=self.def_groups),
                 ConvOffset2d(256, num_box * self.num_classes, kernel_size=5, stride=1, padding=2, dilation=1,
                              num_deformable_groups=self.def_groups),
                 ])

        self.trans_layers = nn.ModuleList([nn.Sequential(nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.ReLU(inplace=True),
                                                         nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)),
                                           nn.Sequential(nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.ReLU(inplace=True),
                                                         nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)),
                                           nn.Sequential(nn.Conv2d(1024, 256, kernel_size=3, stride=1, padding=1),
                                                         nn.ReLU(inplace=True),
                                                         nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)),
                                           ])
        self.up_layers = nn.ModuleList([nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0),
                                        nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0),
                                        nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2, padding=0),
                                        ])
        self.latent_layers = nn.ModuleList([nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
                                           ])

        if self.phase == 'test':
            self.softmax = nn.Softmax(dim=1)

    def forward(self, x):

        arm_sources = list()
        arm_loc_list = list()
        arm_offset_list = list()
        obm_loc_list = list()
        obm_conf_list = list()
        obm_sources = list()
        if self.multihead:
            arm_offset2_list = list()

        for i in range(5):
            x = self.backbone[i](x)
        x=self.maxpool(x)
        arm_sources.append(x)

        for i in range(5, 6):
            x = self.backbone[i](x)
        arm_sources.append(x)
        for i in range(6,7):
            x = self.backbone[i](x)
        arm_sources.append(x)
        for i in range(7, len(self.backbone)):
            x = self.backbone[i](x)
        arm_sources.append(x)

        if self.multihead:
            for (a, l, f, f2) in zip(arm_sources, self.arm_loc,self.offset, self.offset2):
                loc_a = l(a)
                arm_loc_list.append(loc_a.permute(0, 2, 3, 1).contiguous())
                arm_offset_list.append(f(loc_a))
                arm_offset2_list.append(f2(loc_a))
        else:
            for (a, l, f) in zip(arm_sources, self.arm_loc, self.offset):
                loc_a = l(a)
                arm_loc_list.append(loc_a.permute(0, 2, 3, 1).contiguous())
                arm_offset_list.append(f(loc_a))
        arm_loc = torch.cat([o.view(o.size(0), -1) for o in arm_loc_list], 1)
        x = self.last_layer_trans(x)
        obm_sources.append(x)

        # get transformed layers
        trans_layer_list = list()
        for (x_t, t) in zip(arm_sources, self.trans_layers):
            trans_layer_list.append(t(x_t))
        # fpn module
        trans_layer_list.reverse()
        arm_sources.reverse()
        for (t, u, l) in zip(trans_layer_list, self.up_layers, self.latent_layers):
            x = F.relu(l(F.relu(u(x) + t, inplace=True)), inplace=True)
            obm_sources.append(x)
        obm_sources.reverse()
        if self.multihead:
            for (ob, l, l2, c, c2, f, f2) in zip(obm_sources, self.odm_loc, self.odm_loc_2, self.odm_conf,  self.odm_conf_2, arm_offset_list, arm_offset2_list):
                obm_loc_list.append((l(ob, f)  + l2(ob, f2)).permute(0, 2, 3, 1).contiguous())
                obm_conf_list.append((c(ob, f) + c2(ob, f2)).permute(0, 2, 3, 1).contiguous())
        else:
            for (ob, l, c, f) in zip(obm_sources, self.odm_loc, self.odm_conf, arm_offset_list):
                obm_loc_list.append(l(ob, f).permute(0, 2, 3, 1).contiguous())
                obm_conf_list.append(c(ob, f).permute(0, 2, 3, 1).contiguous())
        obm_loc = torch.cat([o.view(o.size(0), -1) for o in obm_loc_list], 1)
        obm_conf = torch.cat([o.view(o.size(0), -1) for o in obm_conf_list], 1)

        # apply multibox head to source layers
        if self.phase == 'test':
            output = (
                arm_loc.view(arm_loc.size(0), -1, 4),  # loc preds
                None, #self.softmax(arm_conf.view(-1, self.num_classes)),  # conf preds
                obm_loc.view(obm_loc.size(0), -1, 4),  # loc preds
                self.softmax(obm_conf.view(-1, self.num_classes)),  # conf preds
            )
        else:
            output = (
                arm_loc.view(arm_loc.size(0), -1, 4),  # loc preds
                None, # arm_conf.view(arm_conf.size(0), -1, self.num_classes),  # conf preds
                obm_loc.view(obm_loc.size(0), -1, 4),  # loc preds
                obm_conf.view(obm_conf.size(0), -1, self.num_classes),  # conf preds
            )

        return output

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def load_weights(self, base_file):
        other, ext = os.path.splitext(base_file)
        if ext == '.pkl' or '.pth':
            print('Loading weights into state dict...')
            self.load_state_dict(torch.load(base_file, map_location=lambda storage, loc: storage))
            print('Finished!')
        else:
            print('Sorry only .pth and .pkl files supported.')
extras = {
    '300': [256, 'S', 512, 256, 'S', 512, 256, 512, 256, 512],
    '320': [256, 'S', 512, 256, 'S', 512, 256, 512, 256, 512],
    '512': [256, 'S', 512, 256, 'S', 512, 256, 'S', 512, 256, 'S', 512],
}
def build_net(phase, size=320, num_classes=21, def_groups=1, multihead=False,backbone='ResNet18'):
    if size not in [320, 512]:
        print("Error: Sorry only SSD320 and SSD512 is supported currently!")
        return
    if 'ResNet18' in backbone:
        res_layers = [2, 2, 2, 2]
    elif 'ResNet50' in backbone:
        res_layers = [3, 4, 6, 3]
    elif 'ResNet101' in backbone:
        res_layers = [3, 4, 23, 3]
    else:
        print("Error: Unknown model!")
        return
    return RefineSSD_resnet(size,res_layers,num_classes=num_classes, phase=phase, def_groups=def_groups, multihead=multihead)
