"""
基于U-net的回归模型,只保留编码器部分
"""
import torch
import torch.nn as nn
from torchviz import make_dot
from utils.base_util import get_parameter_number


class BasicBlock(nn.Module):
    # 构造3 * 3卷积
    def __init__(self, in_channels, out_channels, stride=1, is_relu=True):
        super(BasicBlock, self).__init__()
        self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
                              kernel_size=3, stride=stride, padding=1)
        self.relu = nn.ReLU()
        self.is_relu = is_relu

    def forward(self, x):
        x = self.conv(x)
        if self.is_relu:
            out = self.relu(x)
        else:
            out = x
        return out


def _make_layer(in_channels, out_channels, layers_num):
    layers = []
    layer = BasicBlock(in_channels, out_channels)
    layers.append(layer)
    if layers_num > 1:
        for layer_num in range(layers_num - 1):
            if layer_num == layers_num-2:
                layer = BasicBlock(out_channels, out_channels, is_relu=False)
                layers.append(layer)
            else:
                layer = BasicBlock(out_channels, out_channels, is_relu=True)
                layers.append(layer)
    return nn.Sequential(*layers)


class ImgEncoder(nn.Module):
    def __init__(self, in_channels=1, scale=1, layers_num=1):
        super(ImgEncoder, self).__init__()
        self.conv1 = _make_layer(in_channels, int(64 / scale), layers_num)
        self.conv2 = _make_layer(int(64 / scale), int(128 / scale), layers_num)
        self.conv3 = _make_layer(int(128 / scale), int(256 / scale), layers_num)
        self.relu = nn.ReLU()
        self.sigmoid = nn.Sigmoid()
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

    def forward(self, x):
        x = self.conv1(x)
        out1 = self.sigmoid(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.conv2(x)
        out2 = self.sigmoid(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.conv3(x)
        out3 = self.sigmoid(x)
        return out1, out2, out3


class PosEncoder(nn.Module):
    def __init__(self, img_channels=1, pos_channels=2, scale=1, layers_num=1):
        super(PosEncoder, self).__init__()
        self.img_encoder = ImgEncoder(in_channels=img_channels, scale=scale, layers_num=layers_num)
        self.conv1 = _make_layer(pos_channels, int(64 / scale), layers_num)
        self.conv2 = _make_layer(int(64 / scale), int(128 / scale), layers_num)
        self.conv3 = _make_layer(int(128 / scale), int(256 / scale), layers_num)
        self.conv4 = nn.Conv2d(in_channels=int(256 / scale), out_channels=1,
                               kernel_size=1, stride=1, padding=0)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)

    def forward(self, x, pos):
        out1, out2, out3 = self.img_encoder(x)
        x = self.conv1(pos)
        x = self.relu(x)
        out1 = x * out1
        x = self.maxpool(out1)
        x = self.conv2(x)
        x = self.relu(x)
        out2 = x * out2
        x = self.maxpool(out2)
        x = self.conv3(x)
        x = self.relu(x)
        out3 = x * out3
        out = self.conv4(out3)
        return out


class UBasicNet(nn.Module):
    def __init__(self, img_channels=1, pos_channels=2, out_channels=6,
                 scale=1, conv2mlp=32 * 32, mlp_num=128, layers_num=2):
        super(UBasicNet, self).__init__()
        self.encoder = PosEncoder(img_channels=img_channels, pos_channels=pos_channels,
                                  scale=scale, layers_num=layers_num)
        self.fc1 = nn.Linear(conv2mlp, mlp_num)
        self.fc2 = nn.Linear(mlp_num, mlp_num)
        self.fc3 = nn.Linear(mlp_num, out_channels)
        self.tanh = nn.Tanh()

    def forward(self, x, pos):
        x = self.encoder(x, pos)
        x = x.view(x.shape[0], -1)
        x = self.tanh(self.fc1(x))
        x = self.tanh(self.fc2(x))
        x = self.tanh(self.fc3(x))
        return x


if __name__ == '__main__':

    # print(model)
    # print(model.parameters())
    _x = torch.randn(1, 1, 128, 128).requires_grad_(True)  # 定义一个网络的输入值
    _pos = torch.randn(1, 2, 128, 128).requires_grad_(True)  # 定义一个位置
    # _mlp_num = [1024, 512, 256, 128, 64]
    # conv_scale = [0.5, 1, 2, 4, 8]
    _layers_num = 6
    model = UBasicNet(1, 2, layers_num=4, conv2mlp=32 * 32)
    y = model(_x, _pos)  # 获取网络128的预测值
    MyConvNetVis = make_dot(y, params=dict(list(model.named_parameters()) + [('x', _x)]))
    MyConvNetVis.format = "png"
    # 指定文件生成的文件夹
    MyConvNetVis.directory = "C:/Users/adminTKJ/Desktop/MainProject/RLIR_sumup/assets/network_structure"
    # 生成文件
    MyConvNetVis.view()
    # 查看参数个数
    print(get_parameter_number(model))
    # _conv2mlp = [32 * 32, 32 * 32, 32 * 32, 32 * 32, 32 * 32, 32 * 32]
    # for i in _mlp_num:
    #     print("mlp层的参数为", i)
    #     model = UBasicNet(1, 2, mlp_num=i)
    #     y = model(_x, _pos)  # 获取网络128的预测值
    #     print(get_parameter_number(model))
    # for i in range(1, _layers_num):
    #     print("conv层的层数为", i)
    #     model = UBasicNet(1, 2, layers_num=i, conv2mlp=32*32)
    #     y = model(_x, _pos)  # 获取网络128的预测值
    #     print(get_parameter_number(model))
