from __future__ import absolute_import, division, print_function

import numpy as np

import torch
import torch.nn as nn
from .resnet import ResNet, BasicBlock, resnet18, resnet34, resnet50, resnet101, Bottleneck
from torch.nn import BatchNorm2d as bn

class ResNetMultiImageInput(ResNet):
    def __init__(self, block, layers, num_classes=1000, num_input_images=2):
        super(ResNetMultiImageInput, self).__init__(block, layers)
        self.inplanes = 64
        self.conv1 = nn.Conv2d(num_input_images * 3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = bn(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)


def resnet_multiimage_input(num_layers, num_input_images=2, pretrained_path=None):
    assert num_layers in [18, 34, 50, 101], "Can only run with 18, 34, 50, 101 layers resnet"
    blocks = {18 : [2, 2, 2,  2],
              34 : [3, 4, 6,  3],
              50 : [3, 4, 6,  3],
              101: [3, 4, 23, 3],
              }[num_layers]

    if num_layers < 40:
        model = ResNetMultiImageInput(BasicBlock, blocks, num_input_images=num_input_images)
    elif num_layers > 40:
        model = ResNetMultiImageInput(Bottleneck, blocks, num_input_images=num_input_images)

    if pretrained_path is not None:
        loaded = torch.load(pretrained_path)
        loaded['conv1.weight'] = torch.cat([loaded['conv1.weight']] * num_input_images, 1) / num_input_images
        model.load_state_dict(loaded)
    return model


class PoseEncoder(nn.Module):
    def __init__(self, num_layers, pretrained_path=None, num_input_images=2):
        super(PoseEncoder, self).__init__()

        self.num_ch_enc = np.array([64, 64, 128, 256, 512])

        resnets = {18: resnet18,
                   34: resnet34,
                   50: resnet50,
                   101: resnet101,}

        if num_layers not in resnets:
            raise ValueError("{} is not a valid number of resnet layers".format(num_layers))

        if num_input_images > 1:
            self.encoder = resnet_multiimage_input(num_layers, num_input_images, pretrained_path)
        else:
            self.encoder = resnets[num_layers]()
            if pretrained_path is not None:
                checkpoint = torch.load(pretrained_path)
                self.encoder.load_state_dict(checkpoint)

        if num_layers > 34:
            self.num_ch_enc[1:] *= 4

        # for name, param in self.encoder.named_parameters():
        #     if 'bn' in name:
        #         param.requires_grad = False

    def forward(self, input_image):

        self.features = []
        x = (input_image - 0.45) / 0.225


        # print("1111111111111111111111111111111")
        # print(x.shape)
        x1 = self.encoder.conv1(x)
        x1 = self.encoder.bn1(x1)
        x1 = self.encoder.relu(x1)
        # print(x1.shape)
        x2 = self.encoder.maxpool(x1)
        x2 = self.encoder.layer1(x2)
        # print(x2.shape)
        x3 = self.encoder.layer2(x2)
        # print(x3.shape)
        x4 = self.encoder.layer3(x3)
        # print(x4.shape)
        x5 = self.encoder.layer4(x4)
        # print(x5.shape)


        # print("1111111111111111111111111111111")
        # self.features.append(self.encoder.relu(self.encoder.bn1(self.encoder.conv1(x))))
        # self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
        # self.features.append(self.encoder.layer2(self.features[-1]))
        # self.features.append(self.encoder.layer3(self.features[-1]))
        # self.features.append(self.encoder.layer4(self.features[-1]))

        self.features.append(x1)
        self.features.append(x2)
        self.features.append(x3)
        self.features.append(x4)
        self.features.append(x5)

        return self.features


#
# def _make_divisible(ch, divisor=8, min_ch=None):
#     # 使卷积核为round_nearest的整数倍
#     """
#     This function is taken from the original tf repo.
#     It ensures that all layers have a channel number that is divisible by 8
#     It can be seen here:
#     https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
#     """
#     if min_ch is None:
#         min_ch = divisor
#     new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor)
#     # Make sure that round down does not go down by more than 10%.
#     if new_ch < 0.9 * ch:
#         new_ch += divisor
#     return new_ch
#
#
# class ConvBNReLU(nn.Sequential):
#     def __init__(self, in_channel, out_channel, kernel_size=3, stride=1, groups=1):
#         ##groups为1的时候表示的是普通卷积，当其为输入通道的时候表示的是DW卷积
#
#         padding = (kernel_size - 1) // 2
#         super(ConvBNReLU, self).__init__(
#             nn.Conv2d(in_channel, out_channel, kernel_size, stride, padding, groups=groups, bias=False),
#             nn.BatchNorm2d(out_channel),
#             nn.ReLU6(inplace=True)
#         )
#
#
# class InvertedResidual(nn.Module):
#     def __init__(self, in_channel, out_channel, stride, expand_ratio):
#         super(InvertedResidual, self).__init__()
#         hidden_channel = in_channel * expand_ratio  # 第一层卷积核的个数
#         self.use_shortcut = stride == 1 and in_channel == out_channel
#         # 布尔变量
#         layers = []
#         if expand_ratio != 1:
#             # 1x1 pointwise conv
#             layers.append(ConvBNReLU(in_channel, hidden_channel, kernel_size=1))
#         layers.extend([
#             # 3x3 depthwise conv
#             ConvBNReLU(hidden_channel, hidden_channel, stride=stride, groups=hidden_channel),
#             # 1x1 pointwise conv(linear)
#             nn.Conv2d(hidden_channel, out_channel, kernel_size=1, bias=False),
#             nn.BatchNorm2d(out_channel),
#         ])
#
#         self.conv = nn.Sequential(*layers)
#
#     def forward(self, x):
#         if self.use_shortcut:
#             return x + self.conv(x)
#         else:
#             return self.conv(x)
#
#
# class PoseEncoder(nn.Module):
#     def __init__(self, num_layers, alpha=1.0, round_nearest=8):
#         super(PoseEncoder, self).__init__()
#         block = InvertedResidual
#         # input_channel = _make_divisible(32 * alpha, round_nearest)
#         # last_channel = _make_divisible(1280 * alpha, round_nearest)
#         self.num_ch_enc = np.array([64, 64, 128, 256, 512])
#
#         resnets = {18: resnet18,
#                    34: resnet34,
#                    50: resnet50,
#                    101: resnet101, }
#
#         inverted_residual_setting = [
#             # t, c, n, s
#             [1, 64, 1, 1],
#             [6, 128, 2, 2],
#             [6, 128, 2, 1],
#             [6, 256, 3, 2],
#             [6, 256, 3, 1],
#             [6, 512, 4, 2],
#             [6, 512, 4, 1],
#             # [6, 96, 3, 1],
#             # [6, 160, 3, 2],
#             # [6, 320, 1, 1],
#         ]
#
#         # 通道数
#         self.inplanes = 64
#
#         self.conv1 = nn.Conv2d(6, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
#         self.bn1 = nn.BatchNorm2d(self.inplanes)
#         self.relu = nn.ReLU(inplace=True)
#         self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
#
#         # features = []
#         # 第一个卷积层
#         # self.layer = ConvBNReLU(3, self.inplanes, stride=2))
#
#         # 创建第一个倒残差层
#         features1 = []
#         t1, c1, n1, s1 = inverted_residual_setting[0]
#         print(c1 * alpha)
#         output_channel1 = _make_divisible(c1 * alpha, round_nearest)
#         for i in range(n1):
#             stride = s1 if i == 0 else 1
#             features1.append(block(self.inplanes, output_channel1, stride, expand_ratio=t1))
#         self.features1 = nn.Sequential(*features1)
#
#         # 创建第二个倒残差层
#         features2 = []
#         t2, c2, n2, s2 = inverted_residual_setting[1]
#         output_channel2 = _make_divisible(c2 * alpha, round_nearest)
#         for i in range(n2):
#             stride = s2 if i == 0 else 1
#             features2.append(block(c1, output_channel2, stride, expand_ratio=2))
#             c1 = output_channel2
#         self.features2 = nn.Sequential(*features2)
#
#         # 创建第三个倒残差层
#         features3 = []
#         t3, c3, n3, s3 = inverted_residual_setting[2]
#         output_channel3 = _make_divisible(c3 * alpha, round_nearest)
#         for i in range(n3):
#             stride = s3 if i == 0 else 1
#             features3.append(block(c2, output_channel3, stride, expand_ratio=2))
#             c2 = output_channel3
#         self.features3 = nn.Sequential(*features3)
#
#         # 创建第四个倒残差层
#         features4 = []
#         t4, c4, n4, s4 = inverted_residual_setting[3]
#         output_channel4 = _make_divisible(c4 * alpha, round_nearest)
#         for i in range(n4):
#             stride = s4 if i == 0 else 1
#             features4.append(block(c3, output_channel4, stride, expand_ratio=2))
#             c3 = output_channel4
#         self.features4 = nn.Sequential(*features4)
#
#         # 创建第5个倒残差层
#         features5 = []
#         t5, c5, n5, s5 = inverted_residual_setting[4]
#         output_channel5 = _make_divisible(c5 * alpha, round_nearest)
#         for i in range(n5):
#             stride = s5 if i == 0 else 1
#             features5.append(block(c4, output_channel5, stride, expand_ratio=2))
#             c4 = output_channel5
#         self.features5 = nn.Sequential(*features5)
#
#         # 创建第6个倒残差层
#         features6 = []
#         t6, c6, n6, s6 = inverted_residual_setting[5]
#         output_channel6 = _make_divisible(c6 * alpha, round_nearest)
#         for i in range(n6):
#             stride = s6 if i == 0 else 1
#             features6.append(block(c5, output_channel6, stride, expand_ratio=2))
#             c5 = output_channel6
#         self.features6 = nn.Sequential(*features6)
#
#         # 创建第7个倒残差层
#         features7 = []
#         t7, c7, n7, s7 = inverted_residual_setting[6]
#         output_channel7 = _make_divisible(c7 * alpha, round_nearest)
#         for i in range(n7):
#             stride = s7 if i == 0 else 1
#             features7.append(block(c6, output_channel7, stride, expand_ratio=2))
#             c6 = output_channel7
#         self.features7 = nn.Sequential(*features7)
#
#         if num_layers > 34:
#             self.num_ch_enc[1:] *= 4
#         # building last several layers
#         # features.append(ConvBNReLU(input_channel, last_channel, 1))
#         # combine feature layers
#         # self.features = nn.Sequential(*features)
#
#     def forward(self, x):
#
#         self.features = []  # 4个不同尺度的features
#         x = (x - 0.45) / 0.225
#         x = self.conv1(x)
#
#         x = self.bn1(x)
#         x = self.relu(x)
#
#         x1 = self.maxpool(x)
#
#         x1 = self.features1(x1)
#         # print(x1.shape)
#         x2 = self.features2(x1)
#         # print(x2.shape)
#         x3 = self.features3(x2)
#         # print(x3.shape)
#         x4 = self.features4(x3)
#         # print(x4.shape)
#         x5 = self.features5(x4)
#         # print(x5.shape)
#         x6 = self.features6(x5)
#         # print(x6.shape)
#         x7 = self.features7(x6)
#         # print(x7.shape)
#
#         self.features.append(x)
#         self.features.append(x1)
#         self.features.append(x3)
#         self.features.append(x5)
#         self.features.append(x7)
#
#         return self.features  # 4个不同尺度的features
