"""
第二种融合思路：在卷积层的基础上，添加输入门机制的通道融合方式，主要的作用是保留高分辨率的特征到FC层当中
"""
import torch
import torch.nn as nn
from torchviz import make_dot
from utils.base_util import get_parameter_number
from network.lstm import Lstm


class F2Block(nn.Module):
    # 构造（3 * 3卷积 + 2 * 2 池化）
    def __init__(self, in_channel, mid_channel, out_channel, stride=1):
        super(F2Block, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
                               kernel_size=3, stride=stride, padding=1, bias=False)
        self.conv2 = nn.Conv2d(in_channels=in_channel, out_channels=mid_channel,
                               kernel_size=3, stride=stride, padding=1, bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.tanh = nn.Tanh()

    def forward(self, x):
        out = self.conv1(x)
        mid = self.conv2(x)
        mid = self.tanh(mid)
        out = self.maxpool(out)
        out = self.tanh(out)
        # return out, mid
        return out, torch.flatten(mid, 1)


class F2Net(nn.Module):
    def __init__(self, in_channels, out_channels=6, fc1_in_channels=30720):
        super(F2Net, self).__init__()
        self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=(3, 3),
                               padding=1, bias=False)
        # 卷积层
        self.layer128 = F2Block(32, 1, 32)
        self.layer64 = F2Block(32, 2, 32)
        self.layer32 = F2Block(32, 4, 32)
        self.layer16 = nn.Conv2d(in_channels=32, out_channels=8, kernel_size=(3, 3),
                                 padding=1, bias=False)
        # 全连接层
        self.fc128 = nn.Linear(1 * 128 * 128, 6)
        self.fc64 = nn.Linear(2 * 64 * 64, 6)
        self.fc32 = nn.Linear(4 * 32 * 32, 6)
        self.fc16 = nn.Linear(8 * 16 * 16, 6)
        self.tanh = nn.Tanh()

        # 建立对应的输入门
        self.input_gate_t1 = Lstm(6, 4 * 32 * 32, 6)
        self.input_gate_t2 = Lstm(6, 2 * 64 * 64, 6)
        self.input_gate_t3 = Lstm(6, 1 * 128 * 128, 6)

    def forward(self, x):
        x = self.conv0(x)
        x, mid128 = self.layer128(x)
        x, mid64 = self.layer64(x)
        x, mid32 = self.layer32(x)
        x = self.layer16(x)
        hidden_x = torch.flatten(x, 1)
        x16 = self.fc16(hidden_x)
        x32 = self.fc32(mid32)
        x64 = self.fc64(mid64)
        x128 = self.fc128(mid128)
        # lstm的输入门
        x = self.input_gate_t1.forward(x32, mid32, x16)
        x = self.input_gate_t2.forward(x64, mid64, x)
        x = self.input_gate_t3.forward(x128, mid128, x)
        x = self.tanh(x)
        return x


if __name__ == '__main__':
    model = F2Net(in_channels=3, out_channels=6)
    # print(model)
    # print(model.parameters())
    _x = torch.randn(1, 3, 128, 128).requires_grad_(True)  # 定义一个网络的输入值
    y = model(_x)  # 获取网络的预测值
    MyConvNetVis = make_dot(y, params=dict(list(model.named_parameters()) + [('x', _x)]))
    MyConvNetVis.format = "png"
    # 指定文件生成的文件夹
    MyConvNetVis.directory = "C:/Users/adminTKJ/Desktop/RLIR_sumup/assets/network_structure"
    # 生成文件
    MyConvNetVis.view()
    # 查看参数个数
    print(get_parameter_number(model))
