# -*- coding: utf-8 -*-

"""
code from: https://github.com/tonylins/pytorch-mobilenet-v2.git
"""

import subprocess
import numpy as np
import torch
import torch.nn as nn
import math

import float_tensor_pb2


def conv_bn(inp, oup, stride, padding=1):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 3, stride, padding, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU(inplace=True)
    )


def conv_1x1_bn(inp, oup):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU(inplace=True)
    )


class InvertedResidual(nn.Module):
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        self.use_res_connect = self.stride == 1 and inp == oup

        self.conv = nn.Sequential(
            # pw
            nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
            nn.BatchNorm2d(inp * expand_ratio),
            nn.ReLU(inplace=True),
            # dw
            nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
            nn.BatchNorm2d(inp * expand_ratio),
            nn.ReLU(inplace=True),
            # pw-linear
            nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
            nn.BatchNorm2d(oup),
        )

    def forward(self, x):
        if self.use_res_connect:
            return x + self.conv(x)
        else:
            return self.conv(x)


class CMobileNetV2(nn.Module):
    def __init__(self, width_multi=1.):
        super(CMobileNetV2, self).__init__()
        self.classes_number = 1

        # setting of inverted residual blocks
        self.inverted_residual_setting = [
            # t, c,  n, s
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 32, 3, 2],
            [6, 64, 4, 2],
            [6, 96, 3, 1],
            [6, 160, 3, 2],
            [6, 320, 1, 1],
        ]

        input_channel = int(32 * width_multi)
        base_features = [conv_bn(3, input_channel, 2)]
        # building inverted residual blocks
        for t, c, n, s in self.inverted_residual_setting:
            output_channel = int(c * width_multi)
            for i in range(n):
                if i == 0:
                    base_features.append(InvertedResidual(input_channel, output_channel, s, t))
                else:
                    base_features.append(InvertedResidual(input_channel, output_channel, 1, t))
                input_channel = output_channel
        self.base_features = nn.Sequential(*base_features)

        extra_features = list()
        extra_features.append(conv_1x1_bn(320, 256))
        extra_features.append(conv_bn(256, 512, 2))
        extra_features.append(conv_1x1_bn(512, 128))
        extra_features.append(conv_bn(128, 256, 2))
        extra_features.append(conv_1x1_bn(256, 128))
        extra_features.append(conv_bn(128, 256, 1, padding=0))
        self.extra_features = nn.Sequential(*extra_features)

        self.conf_layers = []
        self.conf_layers.append(nn.Conv2d(96,   1 * self.classes_number, 3, 1, 1))
        self.conf_layers.append(nn.Conv2d(320,  3 * self.classes_number, 3, 1, 1))
        self.conf_layers.append(nn.Conv2d(512,  3 * self.classes_number, 3, 1, 1))
        self.conf_layers.append(nn.Conv2d(256,  3 * self.classes_number, 3, 1, 1))
        self.conf_layers.append(nn.Conv2d(256,  3 * self.classes_number, 1, 1, 0))
        self.conf_model = nn.ModuleList(self.conf_layers)

        self.loc_layers = []
        self.loc_layers.append(nn.Conv2d(96,   1 * 4, 3, 1, 1))
        self.loc_layers.append(nn.Conv2d(320,  3 * 4, 3, 1, 1))
        self.loc_layers.append(nn.Conv2d(512,  3 * 4, 3, 1, 1))
        self.loc_layers.append(nn.Conv2d(256,  3 * 4, 3, 1, 1))
        self.loc_layers.append(nn.Conv2d(256,  3 * 4, 1, 1, 0))
        self.loc_model = nn.ModuleList(self.loc_layers)

        self._initialize_weights()

    def _initialize_weights(self):
        modules = list(self.modules())
        for m in modules:
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.weight.data.normal_(0, 0.01)
                m.bias.data.zero_()

    def forward(self, x):
        base_feature_indices = [13, 17]
        extra_feature_indices = [1, 3, 5]
        conf, loc = list(), list()
        for i, feature in enumerate(self.base_features):
            x = feature(x)
            if i in base_feature_indices:
                index = base_feature_indices.index(i)
                conf_predict = self.conf_model[index](x).permute(0, 2, 3, 1).contiguous()
                loc_predict = self.loc_model[index](x).permute(0, 2, 3, 1).contiguous()
                conf.append(conf_predict.view(conf_predict.size(0), -1, self.classes_number))
                loc.append(loc_predict.view(loc_predict.size(0), -1, 4))
        for i, feature in enumerate(self.extra_features):
            x = feature(x)
            if i in extra_feature_indices:
                index = extra_feature_indices.index(i) + 2
                conf_predict = self.conf_model[index](x).permute(0, 2, 3, 1).contiguous()
                loc_predict = self.loc_model[index](x).permute(0, 2, 3, 1).contiguous()
                conf.append(conf_predict.view(conf_predict.size(0), -1, self.classes_number))
                loc.append(loc_predict.view(loc_predict.size(0), -1, 4))
        conf = torch.cat(conf, dim=1)
        loc = torch.cat(loc, dim=1)
        return conf, loc


def save_float_tensor(float_tensor_np, save_file_path):
    assert isinstance(float_tensor_np, np.ndarray)
    assert float_tensor_np.ndim == 3

    float_tensor_pb = float_tensor_pb2.FloatTensor()
    float_tensor_pb.shape.channel = float_tensor_np.shape[0]
    float_tensor_pb.shape.height = float_tensor_np.shape[1]
    float_tensor_pb.shape.width = float_tensor_np.shape[2]
    float_tensor_pb.data.extend(float_tensor_np.flatten())

    with open(save_file_path, 'wb') as save_file:
        save_file.write(float_tensor_pb.SerializeToString())


def main():
    model = CMobileNetV2()
    model.eval()

    input_x = torch.autograd.Variable(torch.randn(1, 3, 288, 320) * 10 + 1)
    conf, loc = model(input_x)
    conf = torch.unsqueeze(conf, dim=0)
    loc = torch.unsqueeze(loc, dim=0)
    print(conf.size(), loc.size())

    print('conf 10 front values: ')
    print(conf.data.numpy()[0][0, 0:10, 0])
    print('loc 10 front values: ')
    print(loc.data.numpy()[0][0, 0:3, 0:4])

    input_file_path = '../../test_data/mobilenet_v2_ssd_test_case_input.pb'
    conf_file_path = '../../test_data/mobilenet_v2_ssd_test_case_conf.pb'
    loc_file_path = '../../test_data/mobilenet_v2_ssd_test_case_loc.pb'
    onnx_file_path = '../../test_data/mobilenet_v2_ssd_test_case.onnx'
    save_float_tensor(input_x.data.numpy()[0], input_file_path)
    save_float_tensor(conf.data.numpy()[0], conf_file_path)
    save_float_tensor(loc.data.numpy()[0], loc_file_path)
    torch.onnx.export(model, input_x, onnx_file_path, verbose=False)

    subprocess.call(['../onnx2ncnn',
                     '../../test_data/mobilenet_v2_ssd_test_case.onnx',
                     '../../test_data/mobilenet_v2_ssd_test_case.param',
                     '../../test_data/mobilenet_v2_ssd_test_case.bin'])
    subprocess.call(['rm', '../../test_data/mobilenet_v2_ssd_test_case.onnx'])


if __name__ == "__main__":
    main()
