import torch
from torch import nn
import numpy as np
import os


# 基本块 Convolutional  Residual  DownSampling ConvolutionalSet Upsampling Predict
class ConvolutionalLayer(nn.Module):
    def __init__(self, in_ch, out_ch, k_size, stride, pad=0):
        super(ConvolutionalLayer, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_ch, out_ch, k_size, stride, pad, bias=False),
            nn.BatchNorm2d(out_ch),
            nn.LeakyReLU(0.1),  # 使用LeakyRelu有啥区别
        )

    def forward(self, x):
        return self.conv(x)


# 下采样，输出宽高为输入宽高的一般
class DownSamplingLayer(nn.Module):
    def __init__(self, in_ch, out_ch, k_size):
        super(DownSamplingLayer, self).__init__()
        self.conv = ConvolutionalLayer(in_ch, out_ch, k_size, 2, 1)

    def forward(self, x):
        return self.conv(x)


# 残差块 一个块里包含两个convolutional 层
class ResidualBlockLayer(nn.Module):
    def __init__(self, in_ch):
        super(ResidualBlockLayer, self).__init__()
        # 瓶颈结构
        self.layer = nn.Sequential(
            ConvolutionalLayer(in_ch, in_ch // 2, 1, 1),
            ConvolutionalLayer(in_ch // 2, in_ch, 3, 1, 1),
        )

    def forward(self, x):
        return self.layer(x) + x


class UpsamplingLayer(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x):
        return torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')


# Convolutional Set
class ConvolutionalSetLayer(nn.Module):
    def __init__(self, in_ch, out_ch):
        super().__init__()
        self.layer = nn.Sequential(
            # 这一块的结构比较奇特 瓶颈结构+通道变化
            ConvolutionalLayer(in_ch, out_ch, 1, 1),
            ConvolutionalLayer(out_ch, out_ch * 2, 3, 1, 1),
            ConvolutionalLayer(out_ch * 2, out_ch, 1, 1),
            ConvolutionalLayer(out_ch, out_ch * 2, 3, 1, 1),
            ConvolutionalLayer(out_ch * 2, out_ch, 1, 1),
        )

    def forward(self, x):
        return self.layer(x)


class YoloV3Net(nn.Module):
    def __init__(self):
        super(YoloV3Net, self).__init__()
        self.trunk_52 = nn.Sequential(
            ConvolutionalLayer(3, 32, 3, 1, 1),
            DownSamplingLayer(32, 64, 3),

            # group 1
            ResidualBlockLayer(64),
            DownSamplingLayer(64, 128, 3),

            # grop2
            ResidualBlockLayer(128),
            ResidualBlockLayer(128),
            DownSamplingLayer(128, 256, 3),

            # group3
            ResidualBlockLayer(256),
            ResidualBlockLayer(256),
            ResidualBlockLayer(256),
            ResidualBlockLayer(256),
            ResidualBlockLayer(256),
            ResidualBlockLayer(256),
            ResidualBlockLayer(256),
            ResidualBlockLayer(256),
        )

        self.trunk_26 = nn.Sequential(
            DownSamplingLayer(256, 512, 3),
            ResidualBlockLayer(512),
            ResidualBlockLayer(512),
            ResidualBlockLayer(512),
            ResidualBlockLayer(512),
            ResidualBlockLayer(512),
            ResidualBlockLayer(512),
            ResidualBlockLayer(512),
            ResidualBlockLayer(512),
        )

        self.trunk_13 = nn.Sequential(
            DownSamplingLayer(512, 1024, 3),
            ResidualBlockLayer(1024),
            ResidualBlockLayer(1024),
            ResidualBlockLayer(1024),
            ResidualBlockLayer(1024),
        )

        self.convset_13 = nn.Sequential(
            ConvolutionalSetLayer(1024, 512)
        )
        self.det_13 = nn.Sequential(
            ConvolutionalLayer(512, 1024, 3, 1, 1),
            nn.Conv2d(1024, 75, 1, 1, 0)
        )

        self.up_26 = nn.Sequential(
            ConvolutionalLayer(512, 256, 1, 1, 0),
            UpsamplingLayer()
        )
        self.convset_26 = nn.Sequential(
            ConvolutionalSetLayer(768, 256)
        )
        self.det_26 = nn.Sequential(
            ConvolutionalLayer(256, 512, 3, 1, 1),
            nn.Conv2d(512, 75, 1, 1, 0)
        )

        self.up_52 = nn.Sequential(
            ConvolutionalLayer(256, 128, 1, 1, 0),
            UpsamplingLayer()
        )
        self.convset_52 = nn.Sequential(
            ConvolutionalSetLayer(384, 128)
        )
        self.det_52 = nn.Sequential(
            ConvolutionalLayer(128, 256, 3, 1, 1),
            nn.Conv2d(256, 75, 1, 1, 0)
        )

    def forward(self, x):
        h_52 = self.trunk_52(x)
        h_26 = self.trunk_26(h_52)
        h_13 = self.trunk_13(h_26)

        convset_13 = self.convset_13(h_13)
        out_13 = self.det_13(convset_13)
        # out_13[:, 0::25] = torch.sigmoid(out_13[:, 0::25])
        # out_13[:, 1::25] = torch.sigmoid(out_13[:, 1::25])
        # out_13[:, 2::25] = torch.sigmoid(out_13[:, 2::25])

        up_26 = self.up_26(convset_13)
        convset_26 = self.convset_26(torch.cat((up_26, h_26), dim=1))
        out_26 = self.det_26(convset_26)
        # out_26[:, 0::25] = torch.sigmoid(out_26[:, 0::25])
        # out_26[:, 1::25] = torch.sigmoid(out_26[:, 1::25])
        # out_26[:, 2::25] = torch.sigmoid(out_26[:, 2::25])

        up_52 = self.up_52(convset_26)
        convset_52 = self.convset_52(torch.cat((up_52, h_52), dim=1))
        out_52 = self.det_52(convset_52)
        # out_52[:, 0::25] = torch.sigmoid(out_52[:, 0::25])
        # out_52[:, 1::25] = torch.sigmoid(out_52[:, 1::25])
        # out_52[:, 2::25] = torch.sigmoid(out_52[:, 2::25])

        return out_13, out_26, out_52


def freeze_darknet(self):
    for i, m in enumerate(self.named_modules()):
        if i < 2:
            continue
        classname = m[0]
        if "trunk_52" not in classname and "trunk_26" not in classname and "trunk_13" not in classname and "YoloV3Net" not in classname:
            break
        if "BatchNorm2d" in str(type(m[1])):
            m[1].eval()
            m[1].weight.requires_grad = False
            m[1].bias.requires_grad = False
            m[1].running_mean.requires_grad = False
            m[1].running_var.requires_grad = False
            # print(m[1].weight)
            # print(m[1].bias)
            # print(m[1].weight.requires_grad)
        if "Conv2d" in str(type(m[1])):
            m[1].weight.requires_grad = False
            # print(m[1].weight.requires_grad)
        # print(m[0],type(m[1]))


#     print(i[0])

def load_darknet_weights(self, weights):
    with open(weights, 'rb') as f:
        # Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
        version = np.fromfile(f, dtype=np.int32, count=3)  # (int32) version info: major, minor, revision
        seen = np.fromfile(f, dtype=np.int64, count=1)  # (int64) number of images seen during training

        weights = np.fromfile(f, dtype=np.float32)  # the rest are weights

    print("version,seen", version, seen)
    print("weights size", weights.shape)

    ptr = 0
    module = self.named_modules()
    for i in range(1000):
        m = next(module)
        if i < 3:
            continue

        classname = m[0]
        # print(m[0],type(m[1]))
        if "trunk_52" not in classname and "trunk_26" not in classname and "trunk_13" not in classname and "YoloV3Net" not in classname:
            break
        if "Conv2d" in str(type(m[1])):
            conv = m[1]
            m = next(module)
            # print('---')
            if "BatchNorm2d" in str(type(m[1])):
                # print('load_bn')
                bn_layer = m[1]
                nb = bn_layer.bias.numel()
                bn_layer.bias.data.copy_(torch.from_numpy(weights[ptr: ptr + nb]).view_as(bn_layer.bias))
                ptr += nb
                bn_layer.weight.data.copy_(torch.from_numpy(weights[ptr: ptr + nb]).view_as(bn_layer.weight))
                ptr += nb
                bn_layer.running_mean.data.copy_(
                    torch.from_numpy(weights[ptr: ptr + nb]).view_as(bn_layer.running_mean))
                ptr += nb
                bn_layer.running_var.data.copy_(torch.from_numpy(weights[ptr: ptr + nb]).view_as(bn_layer.running_var))
                ptr += nb
            # print('load_weight')
            nb = conv.weight.numel()
            conv.weight.data.copy_(torch.from_numpy(weights[ptr: ptr + nb]).view_as(conv.weight))
            ptr += nb

    print("all weights load ok: including Batchnormal", ptr)


if __name__ == '__main__':
    d = YoloV3Net()

    # print(d.trunk_52[0].conv[0].weight)
    load_darknet_weights(d, r"\\192.168.1.153\share\yolov3\darknet53.conv.74")
    freeze_darknet(d)
    # optim = torch.optim()
    # optim.Adam(filter(lambda p: p.requires_grad, d.parameters()), lr=0.1)
    # for m in d.named_modules():
    #     print(m[0],type(m[1]))

    # print(d.trunk_52[0].conv[0].weight)
    # print(d.trunk_52[1].conv[0].weight)
    # for key,para in d.state_dict().items():
    #     print(key,type(para))

    # for i in d.named_modules():
    #     print(i[0])
    # for j in i.modules():
    #     print(j)
    # print(d.modules())

    # for name, param in d.named_parameters():
    #     print(name,param.shape,param.size(),type(param))
    #     param.data.copy_(torch.zeros_like(param.data))
    # n = 0
    # number = 0
    # for key in d.state_dict().keys():
    #
    #     d.state_dict()[key].copy_(torch.zeros_like(d.state_dict()[key]))
    #     if "trunk_52" in key or "trunk_26" in key or "trunk_13" in key:
    #         if 'num_batches_tracked' not in key:
    #             n+=1
    #             lenth = len(d.state_dict()[key].reshape(-1).tolist())
    #             number += lenth
    #             print(key, type(d.state_dict()[key]))

    # nn.init.kaiming_normal_()
    # print(d.state_dict())
    # d.load_state_dict()
    # print(d.det_52[0])
    # print(number)
    # print(n)
    x = torch.randn(1, 3, 416, 416)
    out = d(x)
    print(out[0].shape)
    print(out[1].shape)
    print(out[2].shape)

    # print(d.state_dict())
