import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
from data import *
import os
from submodule import *

model_urls = {
    'vgg': 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth',
}

class Myssd(nn.Module):

    def __init__(self, phase, size, num_classes):
        super(Myssd, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        #self.cfg = (coco, voc300)[num_classes == 21]
        self.cfg = myvoc512
        self.priorbox = PriorBox(self.cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
        self.size = size

        # SSD network
        # Layer learns to scale the l2 normalized features from conv4_3





        #backbone
        self.head = BNConvReLU(5, out_channels=32 , kernel_size=7, stride=2, padding=3)  #256

        self.SE = nn.ModuleList()
        self.SE.append(BNConvReLU(32, 64, kernel_size=3, stride=1, padding=1))
        self.SE.append(BNConvReLU(64, 64, kernel_size=3, stride=1, padding=1))
        self.SE.append(BNConv(64, 128, kernel_size=3, stride=2, padding=1))
        self.SE.append(SELayer(128))
        self.SE.append(BNConvReLU(128, 256, kernel_size=3, stride=1, padding=1))
        self.SE.append(BNConvReLU(256, 256, kernel_size=3, stride=1, padding=1))
        self.SE.append(BNConv(256, 256, kernel_size=3, stride=2, padding=1))
        self.SE.append(SELayer(256))

        # self.BN1 = nn.ModuleList()
        # self.BN1.append(BNConvReLU(32, 64, kernel_size=3, stride=1, padding=1))
        # self.BN1.append(BNConvReLU(64, 64, kernel_size=3, stride=1, padding=1))
        # self.BN1.append(BNConv(64, 128, kernel_size=3, stride=2, padding=1)) #128
        # self.SE1 = SELayer(128)
        #
        # self.BN2 = nn.ModuleList()
        # self.BN2.append(BNConvReLU(128, 256, kernel_size=3, stride=1, padding=1))
        # self.BN2.append(BNConvReLU(256, 256, kernel_size=3, stride=1, padding=1))
        # self.BN2.append(BNConv(256, 256, kernel_size=3, stride=2, padding=1))  #64
        # self.SE2 = SELayer(256)
        #

        self.LSTM = nn.ModuleList()
        self.LSTM.append(ConvLSTM(256, 256, 3)) # 32
        self.LSTM.append(ConvLSTM(256, 256, 3))
        self.LSTM.append(ConvLSTM(256, 256, 3)) # 8
        self.LSTM.append(ConvLSTM(256, 256, 3))
        self.LSTM.append(ConvLSTM(256, 256, 3)) # 2

        # anchor

        loc_layers = []
        conf_layers = []
        num_classes = 8

        cfg_ = [4, 6, 6, 4, 4]

        for i in range(5):
            loc_layers += [nn.Conv2d(256, cfg_[i] * 4, kernel_size=3, padding=1)]
            conf_layers += [nn.Conv2d(256, cfg_[i] * num_classes, kernel_size=3, padding=1)]

        self.loc = nn.ModuleList(loc_layers)
        self.conf = nn.ModuleList(conf_layers)

        if phase == 'test':
            self.softmax = nn.Softmax(dim=-1)
            self.detect = Detect(num_classes, 512, 0, 200, 0.01, 0.45)


    def forward(self, x, prev_states):

        # head & SE
        x = self.head(x)

        # for i, seb in enumerate(self.SE):
        #     x = seb(x)


        for i in range(3):
            x = self.SE[i](x)
        y1 = self.SE[3](x)
        x = x + y1

        for i in range(4, 7):
            x = self.SE[i](x)
        y2 = self.SE[7](x)
        x = x + y2

        # LSTM
        states = []
        sources = []
        loc = []
        conf = []
        for i, ls in enumerate(self.LSTM):
            state = ls(x, prev_states[i])
            x = state[0]
            states.append(state)
            #if i % 2 == 0:
            sources.append(x)

        # apply multibox head to source layers
        for (x, l, c) in zip(sources, self.loc, self.conf):
            loc.append(l(x).permute(0, 2, 3, 1).contiguous())
            conf.append(c(x).permute(0, 2, 3, 1).contiguous())

        loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
        #print(loc.shape)
        #print(conf.shape)
        output = []
        #print(type(x.data))
        if self.phase == "test":

            output = self.detect(
                loc.view(loc.size(0), -1, 4),  # loc preds
                self.softmax(conf.view(conf.size(0), -1,
                                       self.num_classes)),  # conf preds
                self.priors.type(type(x.data))  # default boxes
            )
        else:
            output = (
                loc.view(loc.size(0), -1, 4),
                conf.view(conf.size(0), -1, self.num_classes),
                self.priors
            )
        return output, states

    def load_weights(self, base_file):
        other, ext = os.path.splitext(base_file)
        if ext == '.pkl' or '.pth':
            print('Loading weights into state dict...')
            self.load_state_dict(torch.load(base_file,
                                            map_location=lambda storage, loc: storage))
            print('Finished!')
        else:
            print('Sorry only .pth and .pkl files supported.')


def build_ssd(phase, size=300, num_classes=8):
    if phase != "test" and phase != "train":
        print("ERROR: Phase: " + phase + " not recognized")
        return

    if size not in [300, 512] :
        print("ERROR: You specified size " + repr(size) + ". However, " +
              "currently only SSD300 (size=300) is supported!")

        return

    return Myssd(phase, size, num_classes)


if __name__ == "__main__":
    s=Myssd('test',300,3)
    print(s)





