import os
import torch
from torch.nn import Conv2d, Sequential, ModuleList, BatchNorm2d
from torch import nn
from ..nn.mobilenet_v2.mobilenet_v2 import MobileNetV2, InvertedResidual
from ..ssd.ssd import SSD
from torch.nn import Conv2d, Sequential, ModuleList, ReLU
from basetrainer.utils import torch_tools


def SeperableConv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, onnx_compatible=False):
    """Replace Conv2d with a depthwise Conv2d and Pointwise Conv2d.
    """
    ReLU = nn.ReLU if onnx_compatible else nn.ReLU6
    return Sequential(
        Conv2d(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size,
               groups=in_channels, stride=stride, padding=padding),
        BatchNorm2d(in_channels),
        ReLU(),
        Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1),
    )


def create_mobilenetv2_ssd(prior_boxes, num_classes, is_test=False, width_mult=1.0, pretrained=False, device="cuda:0",
                           **kwargs):
    """
    <class 'list'>: [[24, 12, 6, 3], [24, 12, 6, 3]]
    "shrinkage": [8, 16, 32, 64],
    index=7,c=192
    :param prior_boxes:
    :param num_classes:
    :param is_test:
    :param device:
    :return:
    """
    base_net = MobileNetV2(width_mult=width_mult, use_batch_norm=True, onnx_compatible=False)
    backbone = base_net.features
    # feature_index = [7, 11, 17]
    feature_index = [6, 10, 16]
    channels = [int(32 * width_mult),
                int(64 * width_mult),
                int(160 * width_mult),
                base_net.last_channel]
    extra_layers = ModuleList([Sequential(Conv2d(in_channels=channels[3],
                                                 out_channels=channels[2],
                                                 kernel_size=1),
                                          ReLU(inplace=True),
                                          SeperableConv2d(in_channels=channels[2],
                                                          out_channels=channels[3],
                                                          kernel_size=3,
                                                          stride=2,
                                                          padding=1),
                                          ReLU(inplace=True))])

    boxes_expand = [len(boxes) * (len(prior_boxes.aspect_ratios)) for boxes in prior_boxes.min_sizes]
    bbox_headers = ModuleList([SeperableConv2d(in_channels=channels[0],
                                               out_channels=boxes_expand[0] * 4,
                                               kernel_size=3,
                                               padding=1),
                               SeperableConv2d(in_channels=channels[1],
                                               out_channels=boxes_expand[1] * 4,
                                               kernel_size=3,
                                               padding=1),
                               SeperableConv2d(in_channels=channels[2],
                                               out_channels=boxes_expand[2] * 4,
                                               kernel_size=3,
                                               padding=1),
                               Conv2d(in_channels=channels[3],
                                      out_channels=boxes_expand[3] * 4,
                                      kernel_size=3,
                                      padding=1)])

    class_headers = ModuleList([SeperableConv2d(in_channels=channels[0],
                                                out_channels=boxes_expand[0] * num_classes,
                                                kernel_size=3,
                                                padding=1),
                                SeperableConv2d(in_channels=channels[1],
                                                out_channels=boxes_expand[1] * num_classes,
                                                kernel_size=3,
                                                padding=1),
                                SeperableConv2d(in_channels=channels[2],
                                                out_channels=boxes_expand[2] * num_classes,
                                                kernel_size=3,
                                                padding=1),
                                Conv2d(in_channels=channels[3],
                                       out_channels=boxes_expand[3] * num_classes,
                                       kernel_size=3,
                                       padding=1)])
    model = SSD(num_classes,
                backbone,
                extra_layers,
                feature_index,
                class_headers,
                bbox_headers,
                is_test=is_test,
                prior_boxes=prior_boxes,
                device=device)

    if pretrained:
        file = pretrained if isinstance(pretrained, str) else ""
        if os.path.exists(file):
            print("use pretrained file {}".format(file))
            load_dict = torch.load(file, map_location=device)
            model = torch_tools.load_pretrained_model(model, load_dict)
        else:
            raise Exception("no pretrained file".format(file))
    return model
