import torch
import torch.nn as nn
import numpy as np
from collections import OrderedDict
from .backbone import backbone_fn
from net.yolo_layer import *


class DarknetModel(nn.Module):
    """
    Object detection model
    """
    def __init__(self, config):
        super(DarknetModel, self).__init__()

        self.config = config
        self.model_param = config["model_param"]
        self.seen = 0
        self.header_info = np.array([0, 0, 0, self.seen, 0], dtype=np.int32)
        # backbone
        _backbone_fn = backbone_fn[self.model_param["backbone_name"]]
        self.backbone = _backbone_fn(self.model_param["backbone_pretrained"])
        _out_filters = self.backbone.layers_out_filters
        # embedding0
        final_out_filter0 = len(config["yolo"]["anchors"][0]) * (5 + config["yolo"]["num_class"])
        self.embedding0 = self._make_embedding([512, 1024], _out_filters[-1], final_out_filter0)
        # embedding1
        final_out_filter1 = len(config["yolo"]["anchors"][1]) * (5 + config["yolo"]["num_class"])
        self.embedding1_cbl = self._make_cbl(512, 256, 1)
        self.embedding1_upsample = nn.Upsample(scale_factor=2, mode='nearest')
        self.embedding1 = self._make_embedding([256, 512], _out_filters[-2] + 256, final_out_filter1)
        # embedding2
        final_out_filter2 = len(config["yolo"]["anchors"][2]) * (5 + config["yolo"]["num_class"])
        self.embedding2_cbl = self._make_cbl(256, 128, 1)
        self.embedding2_upsample = nn.Upsample(scale_factor=2, mode='nearest')
        self.embedding2 = self._make_embedding([128, 256], _out_filters[-3] + 128, final_out_filter2)
        # yolo layers
        self.yolo0 = YoloLayer(config["yolo"]["anchors"][0], config["yolo"]["num_class"], config["img_size"])
        self.yolo1 = YoloLayer(config["yolo"]["anchors"][1], config["yolo"]["num_class"], config["img_size"])
        self.yolo2 = YoloLayer(config["yolo"]["anchors"][2], config["yolo"]["num_class"], config["img_size"])
        self.yolo_layers = [self.yolo0, self.yolo1, self.yolo2]

    def forward(self, x):
        def _brandch(_embedding, _in):
            for i, e in enumerate(_embedding):
                _in = e(_in)
                if i == 4: # YOLO 单尺度分支网络在第5层进行分支，以供其他尺度分支进行上采样
                    out_branch = _in
            return _in, out_branch

        img_size = [x.shape[-1], x.shape[-2]]
        outputs = []
        # backbone
        x2, x1, x0 = self.backbone(x) # get 3-scale output
        # yolo branch 0 大目标，小网格
        out0, out0_branch = _brandch(self.embedding0, x0)
        out0 = self.yolo0(out0, img_size)
        outputs.append(out0)
        # yolo branch 1 中目标，中网格
        x1_in = self.embedding1_cbl(out0_branch)
        x1_in = self.embedding1_upsample(x1_in)
        x1_in = torch.cat([x1_in, x1], 1) # route 层
        out1, out1_branch = _brandch(self.embedding1, x1_in)
        out1 = self.yolo1(out1, img_size)
        outputs.append(out1)
        # yolo branch 2 小目标，大网格
        x2_in = self.embedding2_cbl(out1_branch)
        x2_in = self.embedding2_upsample(x2_in)
        x2_in = torch.cat([x2_in, x2], 1)
        out2, out2_branch = _brandch(self.embedding2, x2_in)
        out2 = self.yolo2(out2, img_size)
        outputs.append(out2)
        return outputs

    def _make_cbl(self, _in, _out, kernel_size):
        """
        CBL = conv + bn + leakyRelu
        """
        pad = (kernel_size - 1) // 2 if kernel_size else 0
        return nn.Sequential(OrderedDict([
            ("conv", nn.Conv2d(_in, _out, kernel_size=kernel_size, stride=1, padding=pad, bias=False)),
            ("bn", nn.BatchNorm2d(_out)),
            ("relu", nn.LeakyReLU(0.1))
        ]))

    def _make_embedding(self, filters_list, in_filters, out_filter):
        m = nn.ModuleList([
            self._make_cbl(in_filters, filters_list[0], 1),
            self._make_cbl(filters_list[0], filters_list[1], 3),
            self._make_cbl(filters_list[1], filters_list[0], 1),
            self._make_cbl(filters_list[0], filters_list[1], 3),
            self._make_cbl(filters_list[1], filters_list[0], 1),
            self._make_cbl(filters_list[0], filters_list[1], 3)
        ])
        m.add_module("conv_out", nn.Conv2d(filters_list[1], out_filter, kernel_size=1,
                                           stride=1, padding=0, bias=True))
        return m

    def load_darknet_weights(self, weights_path):
        """Parses and loads the weights stored in 'weights_path'"""
        # Open the weights file
        fp = open(weights_path, "rb")
        self.header_info = np.fromfile(fp, dtype=np.int32, count=5)  # First five are header values
        self.seen = self.header_info[3] # number of images seen during training
        weights = np.fromfile(fp, dtype=np.float32)  # The rest are weights
        print("Total length of weights = ", weights.shape[0])
        fp.close()

        ptr = 0
        all_dict = self.state_dict()
        all_keys = self.state_dict().keys()
        # print(all_keys)
        last_bn_weight = None
        last_conv = None
        for i, (k, v) in enumerate(all_dict.items()):
            if "bn" in k:
                if "weight" in k:
                    last_bn_weight = v
                elif "bias" in k:
                    num_b = v.numel()
                    vv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(v)
                    v.copy_(vv)
                    # print("bn_bias: ", ptr, num_b, k)
                    ptr += num_b
                    # weight
                    v = last_bn_weight
                    num_b = v.numel()
                    vv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(v)
                    v.copy_(vv)
                    # print("bn_weight: ", ptr, num_b, k)
                    ptr += num_b
                    last_bn_weight = None
                elif "running_mean" in k:
                    num_b = v.numel()
                    vv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(v)
                    v.copy_(vv)
                    # print("bn_mean: ", ptr, num_b, k)
                    ptr += num_b
                elif 'running_var' in k:
                    num_b = v.numel()
                    vv = torch.from_numpy(weights[ptr : ptr + num_b]).view_as(v)
                    v.copy_(vv)
                    # print("bn_var: ", ptr, num_b, k)
                    ptr += num_b
                    # conv
                    v = last_conv
                    num_b = v.numel()
                    vv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(v)
                    v.copy_(vv)
                    # print("conv_weight: ", ptr, num_b, k)
                    ptr += num_b
                    last_conv = None
                # else:
                #     raise Exception("Error for bn")
            elif 'conv' in k:
                if 'weight' in k:
                    last_conv = v
                else:
                    num_b = v.numel()
                    vv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(v)
                    v.copy_(vv)
                    # print("conv_bias: ", ptr, num_b, k)
                    ptr += num_b
                    # conv
                    v = last_conv
                    num_b = v.numel()
                    vv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(v)
                    v.copy_(vv)
                    # print("conv weight: ", ptr, num_b, k)
                    ptr += num_b
                    last_conv = None
        # print("Total ptr = ", ptr)
        print("Real length of loaded weights = ", ptr)

    def save_darknet_weights(self, weights_path, cutoff=-1):
        """
            @:param path    - path of the new weights file
            @:param cutoff  - save layers between 0 and cutoff (cutoff = -1 -> all are saved)
        """
        fp = open(weights_path, "wb")
        self.header_info[3] = self.seen
        self.header_info.tofile(fp)
        # TODO
        # # Iterate through layers
        # for i, (module_def, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
        #     if module_def["type"] == "convolutional":
        #         conv_layer = module[0]
        #         # If batch norm, load bn first
        #         if module_def["batch_normalize"]:
        #             bn_layer = module[1]
        #             bn_layer.bias.data.cpu().numpy().tofile(fp)
        #             bn_layer.weight.data.cpu().numpy().tofile(fp)
        #             bn_layer.running_mean.data.cpu().numpy().tofile(fp)
        #             bn_layer.running_var.data.cpu().numpy().tofile(fp)
        #         # Load conv bias
        #         else:
        #             conv_layer.bias.data.cpu().numpy().tofile(fp)
        #         # Load conv weights
        #         conv_layer.weight.data.cpu().numpy().tofile(fp)

        fp.close()