import torch
import torch.nn as nn
from torchviz import make_dot
from utils.base_util import get_parameter_number
from network.saconv import Bottleneck_SAC


class BasicBlock(nn.Module):
    # 构造（3 * 3卷积 + 2 * 2 池化）
    def __init__(self, in_channel, out_channel, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
                               kernel_size=3, stride=stride, padding=1, bias=False)
        # deepBasic改动，在每一个block里面增加更多的卷积层，卷积层之间用relu激活函数
        # 每个卷积层的通道数增加（原本是32,新增为64，MLP节点数从9248-18496）
        # self.conv2 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
        #                        kernel_size=3, stride=stride, padding=1, bias=False)
        # self.conv3 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
        #                        kernel_size=3, stride=stride, padding=1, bias=False)
        # self.conv4 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
        #                        kernel_size=3, stride=stride, padding=1, bias=False)
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()

    def forward(self, x):
        out = self.conv1(x)
        # out = self.relu(self.conv2(out))
        # out = self.relu(self.conv3(out))
        # out = self.relu(self.conv4(out))
        out = self.maxpool(out)
        out = self.tanh(out)

        return out


class BasicNet(nn.Module):
    def __init__(self, block, blocks_num, in_channels, out_channels, fc1_in_channels):
        super(BasicNet, self).__init__()
        self.conv0 = nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=(3, 3))
        # 卷积层
        self.layer = self._make_layer(block, 32, blocks_num)
        # 添加空洞卷积
        # self.conv2 = nn.Conv2d(in_channels=32, out_channels=32,
        #                        kernel_size=3, stride=1, padding=1, bias=False, dilation=2)
        # self.conv3 = nn.Conv2d(in_channels=32, out_channels=32,
        #                        kernel_size=3, stride=1, padding=1, bias=False, dilation=5)
        # 添加SAC卷积
        # self.conv2 = Bottleneck_SAC(32, 32)
        # self.conv3 = Bottleneck_SAC(32, 32)
        # 全连接层
        self.fc1 = nn.Linear(fc1_in_channels, 256)
        self.fc2 = nn.Linear(256, 256)
        self.fc3 = nn.Linear(256, 128)
        self.fc4 = nn.Linear(128, out_channels)
        self.tanh = nn.Tanh()

    def _make_layer(self, block, channel, block_num):
        layers = []
        for _ in range(block_num):
            layers.append(block(channel, channel))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv0(x)
        x = self.layer(x)
        # x = self.conv2(x)
        # x = self.conv3(x)
        hidden_x = torch.flatten(x, 1)
        # x = torch.cat((x, t), 1)
        x = self.tanh(self.fc1(hidden_x))
        x = self.tanh(self.fc2(x))
        x = self.tanh(self.fc3(x))
        x = self.tanh(self.fc4(x))
        return x


def basic_net(in_channels=1, out_channels=6):
    return BasicNet(BasicBlock, blocks_num=3, in_channels=in_channels, out_channels=out_channels, fc1_in_channels=9248)


if __name__ == '__main__':
    model = basic_net(in_channels=1)
    # print(model)
    # print(model.parameters())
    _x = torch.randn(1, 1, 128, 128).requires_grad_(True)  # 定义一个网络的输入值
    y = model(_x)  # 获取网络的预测值
    # MyConvNetVis = make_dot(y, params=dict(list(model.named_parameters()) + [('x', _x)]))
    # MyConvNetVis.format = "png"
    # # 指定文件生成的文件夹
    # MyConvNetVis.directory = "C:/Users/adminTKJ/Desktop/RLIR_sumup/assets/network_structure"
    # # 生成文件
    # MyConvNetVis.view()
    # 查看参数个数
    print(get_parameter_number(model))
