import torch
import torch.nn as nn
import torch.nn.functional as F

import os
import sys
wdir = os.path.abspath('.')
sys.path.append(wdir)
from layers import *
from data import prior_box_config



PB_CONFIG = prior_box_config

base = {
    '320': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
            512, 512, 512],
    '512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
            512, 512, 512],
}
'''
0:Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
1:ReLU(inplace=True)
2:Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
3:ReLU(inplace=True)
4:MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
5:Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
6:ReLU(inplace=True)
7:Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
8:ReLU(inplace=True)
9:MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
10:Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
11:ReLU(inplace=True)
12:Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
13:ReLU(inplace=True)
14:Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
15:ReLU(inplace=True)
16:MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=True)
17:Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
18:ReLU(inplace=True)
19:Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
20:ReLU(inplace=True)
21:Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
22:ReLU(inplace=True)
23:MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
24:Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
25:ReLU(inplace=True)
26:Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
27:ReLU(inplace=True)
28:Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
29:ReLU(inplace=True)
30:MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
31:Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3))
32:ReLU(inplace=True)
33:Conv2d(1024, 1024, kernel_size=(1, 1), stride=(1, 1))
34:ReLU(inplace=True)
'''
def vgg(cfg, in_ch, batch_norm=False):
    layers = []
    in_channels = in_ch
    for v in cfg:
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        elif v == 'C':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
        else:
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v

    pool5 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
    conv6 = nn.Conv2d(in_channels,1024, kernel_size=3, padding=3, dilation=3)  # 此处可以调参
    conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
    layers += [pool5, conv6, nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]

    return layers

extras = {
    '320': [256, 'S', 512],
    '512': [256, 'S', 512],
}
'''
0:Conv2d(3, 256, kernel_size=(1, 1), stride=(1, 1))
1:Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
'''
def add_extras(cfg, in_ch, batch_norm=False):
    # Extra layers added to VGG for feature scaling
    # (size: unknown parameter), in_ch: output channel numbers of backbone network 
    # flag可以用来控制间隔的插入1×1卷积网络 用于改变通道数量 当cfg中出现'S'时将插入一个下采样层输入输出通道分别为'S'前后的数字
    layers = []
    in_channels = in_ch
    flag = False
    for k, v in enumerate(cfg):
        if in_channels != 'S':
            if v == 'S':
                layers += [nn.Conv2d(in_channels, cfg[k+1], kernel_size=(1,3)[flag],
                        stride=2, padding=1)]   #kernel_size = 1 if flag == 0 else k_s = 3
            else:
                layers += [nn.Conv2d(in_channels, v, kernel_size=(1,3)[flag])]
            flag = not flag
        in_channels = v
    return layers

mbox = {
    '320': [3, 3, 3, 3],  # number of boxes per feature map location
    '512': [3, 3, 3, 3],  # number of boxes per feature map location
}

def arm_multibox(vgg, extra_layers, cfg):
    '''
    本函数用于生成抽取backbone和extra中特定下采样层的特征用于ARM
    ARM包含两个部分，分别为：用于粗预测锚点框坐标的arm_loc层与用于判断锚点框中是否存在目标的arm_conf层
    vgg_source代表了vgg网络中要抽取的特征是从那一层生成的
    '''
    arm_loc_layers = []
    arm_conf_layers = []
    vgg_source = [21, 28, -2]
    for k, v in enumerate(vgg_source):
        arm_loc_layers += [nn.Conv2d(vgg[v].out_channels, 
                                cfg[k]*4, kernel_size=3, padding=1)]
        arm_conf_layers += [nn.Conv2d(vgg[v].out_channels,
                                cfg[k]*2, kernel_size=3, padding=1)]
    # list[start:end:stride]  extra_layers[1::2] ignores the end 
    # enumerate(sequence, [start=0])
    # 3 means starting from the No.3 layer in arm 
    for k, v in enumerate(extra_layers[1::2], 3):
        arm_loc_layers += [nn.Conv2d(v.out_channels, cfg[k]*4,
                                kernel_size=3, padding=1)]
        arm_conf_layers += [nn.Conv2d(v.out_channels, cfg[k]*2,
                                kernel_size=3, padding=1)]
    return (arm_loc_layers, arm_conf_layers)

def odm_multibox(arm_layers, cfg, num_classes):
    '''
    本函数用于生成和ARM对应的ODM层
    参数arm_layers为函数arm_multibox返回的结果
    '''
    odm_loc_layers = []
    odm_conf_layers = []
    arm_loc_layers, arm_conf_layers = arm_layers
    for k, v in enumerate(arm_loc_layers):
        odm_loc_layers += [nn.Conv2d(256, cfg[k]*4,
                            kernel_size=3, padding=1)]
        odm_conf_layers += [nn.Conv2d(256, cfg[k]*num_classes,
                            kernel_size=3, padding=1)]
    return (odm_loc_layers, odm_conf_layers)


tcb = {
    '320': [512, 512, 1024, 512],
    '512': [512, 512, 1024, 512],
}

def add_tcb(cfg):
    '''
    本函数用于生成TCB相关的层
    其中cfg文件按照论文原文中图1从左向右看
    '''
    feature_scale_layers = []
    feature_upsample_layers = []
    feature_pred_layers = []
    for k, v in enumerate(cfg):
        feature_scale_layers += [nn.Conv2d(cfg[k], 256, 3, padding=1),
                                nn.ReLU(inplace=True),
                                nn.Conv2d(256, 256, 3, padding=1)]
        feature_pred_layers += [nn.ReLU(inplace=True),
                                nn.Conv2d(256, 256, 3, padding=1),
                                nn.ReLU(inplace=True)]
        if k!= len(cfg) - 1:
            feature_upsample_layers += [nn.ConvTranspose2d(256, 256, 2, 2)]
    
    return (feature_scale_layers, feature_upsample_layers, feature_pred_layers)
    
class RefineDet(nn.Module):
    def __init__(self, phase, size, base, extras, ARM, ODM, TCB, num_classes, use_refine=True):
        super(RefineDet, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        self.cfg = PB_CONFIG
        self.priorbox = PriorBox(self.cfg)
        # 获得密集采样的候选框位置
        with torch.no_grad():
            self.priors = self.priorbox.forward()
        self.size = size    # 可能冗余

        # 生成vgg骨干网
        self.vgg = nn.ModuleList(base)

        # 使用在卷积输出后面使用L2归一化，此程序中归一化后的每一层的权重是一个可学习的参数
        self.conv4_3_L2Norm = L2Norm(512, 10)
        self.conv5_3_L2Norm = L2Norm(512, 8)

        # 生成网络的其他层
        self.extras = nn.ModuleList(extras)
        if use_refine:
            self.arm_loc = nn.ModuleList(ARM[0])
            self.arm_conf = nn.ModuleList(ARM[1])
        self.odm_loc = nn.ModuleList(ODM[0])
        self.odm_conf = nn.ModuleList(ODM[1])

        self.tcb_fscale = nn.ModuleList(TCB[0])
        self.tcb_fupsample = nn.ModuleList(TCB[1])
        self.tcb_fpred = nn.ModuleList(TCB[2])
        
        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detection_RefineDet(num_classes, top_k=1000, conf_thresh=0.01,
                                                nms_thresh=0.45, objectness_thresh=0.01,
                                                keep_top_k=500, cfg=self.cfg)
        
    def forward(self, x):
        """Applies network layers and ops on input image(s) x.

        Args:
            x: input image or batch of images. Shape: [batch,3,width,height].

        Return:
            Depending on phase:
            test:
                Variable(tensor) of output class label predictions,
                confidence score, and corresponding location predictions for
                each object detected. Shape: [batch,topk,7]

            train:
                list of concat outputs from:
                    1: confidence layers, Shape: [batch*num_priors,num_classes]
                    2: localization layers, Shape: [batch,num_priors*4]
                    3: priorbox layers, Shape: [2,num_priors*4]
        """        
        sources = []
        tcb_sources = []
        arm_loc = []
        arm_conf = []
        odm_loc = []
        odm_conf = []

        # apply vgg up to conv4_3 relu and conv5_3 relu
        for k in range(30):
            x = self.vgg[k](x)
            if k == 22:
                s = self.conv4_3_L2Norm(x)  # 已经做了3次MaxPooling
                sources.append(s)
            elif k == 29:
                s = self.conv5_3_L2Norm(x)  # 已经做了4次MaxPooling
                sources.append(s)
        
        # 数据传送至vgg尾部
        for k in range(30, len(self.vgg)):
            x = self.vgg[k](x)
        sources.append(x)

        # apply extra layers and cache source layer outputs
        for k, v in enumerate(self.extras):
            x = F.relu(v(x), inplace=True)
            if k % 2 == 1:
                sources.append(x)

        # apply ARM and ODM to source layers
        for (x, l, c) in zip(sources, self.arm_loc, self.arm_conf):
            # arm_loc = [batch_size, width, height, num_box_per_position(3)*loc_data(4)]
            arm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())   
            # arm_conf = [batch_size, width, height, num_box_per_position(3)*conf_data(2)]
            arm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())
        # arm_loc = [batch_size, width*height*num_box_per_position(3)*loc_data(4)]
        arm_loc = torch.cat([o.view(o.size(0), -1) for o in arm_loc], 1) 
        # arm_conf = [batch_size, width*height*num_box_per_position(3)*conf_data(2)]
        arm_conf = torch.cat([o.view(o.size(0), -1) for o in arm_conf], 1) 

        # calculate TCB features
        p = None
        # 从最后一层向前传播图像特征
        for k, v in enumerate(sources[::-1]):
            s = v
            # 从ARM的特征图中获得特征
            for i in range(3):  
                s = self.tcb_fscale[(3-k)*3 + i](s)
            if k!=0 :   # 最后一层无需进行上采样
                u = p
                # 从后面一层TCB获取上采样后的特征
                u = self.tcb_fupsample[3-k](u)
                # 叠加ARM与后一层TCB的特征
                s += u
            for i in range(3):
                s = self.tcb_fpred[(3-k)*3 + i](s)
            # 暂存当前TCB特征用于上一个TCB层特征融合
            p = s
            tcb_sources.append(s)
        
        tcb_sources.reverse()

        # apply ODM to source layers
        for (x, l ,c) in zip(tcb_sources, self.odm_loc, self.odm_conf):
            odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())
            odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())
        odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)
        odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)
        if self.phase == 'test':
            output = self.detect.forward(
                arm_loc.view(arm_loc.size(0), -1, 4),
                self.softmax(arm_conf.view(arm_conf.size(0), -1, 2)),
                odm_loc.view(odm_loc.size(0), -1, 4),
                self.softmax(odm_conf.view(odm_conf.size(0), -1, self.num_classes)),
                self.priors.cuda()
            )
        else:
            # 损失函数中以包含softmax，无需另加
            output = (
                arm_loc.view(arm_loc.size(0), -1, 4),
                arm_conf.view(arm_conf.size(0), -1, 2),
                odm_loc.view(odm_loc.size(0), -1, 4),
                odm_conf.view(odm_conf.size(0), -1, self.num_classes),
                self.priors
            )
        
        return output

    def load_weights(self, base_file):
        others, ext = os.path.splitext(base_file)
        if ext == '.pkl' or '.pth':
            print('Loading weights into state dict...')
            self.load_state_dict(torch.load(base_file,
                                            map_location=lambda storage, loc: storage))
            print('Finished!')
        else:
            print('Sorry, only .pth or .pkl files is supported')



def build_refinedet(phase, num_classes=81, batch_norm=False):
    if phase != "test" and phase != "train":
        print("ERROR: Phase: " + phase + "not recognized")
        return
    
    base_ = vgg(base['512'], 3, batch_norm)
    extras_ = add_extras(extras['512'], 1024, batch_norm)
    ARM_ = arm_multibox(base_, extras_, mbox['512'])
    ODM_ = odm_multibox(ARM_, mbox['512'], num_classes)
    TCB_ = add_tcb(tcb['512'])
    return RefineDet(phase, 512, base_, extras_, ARM_, ODM_, TCB_, num_classes)

if __name__ == "__main__":
    tensor = []
    for i in range(10):
        temp = torch.rand([1,2,3,4])
        print(temp)
        tensor.append(temp)
    f = torch.cat([o.view(o.size(0), -1) for o in tensor],1)
    print(f.shape)
    print(f)
