import torch
import torch.nn as nn
from torchviz import make_dot
from utils.base_util import get_parameter_number
from functools import partial
from torch import Tensor
from typing import List
from timm.models.layers import DropPath


class Partial_conv3(nn.Module):
    def __init__(self, dim, n_div, forward):
        super().__init__()
        self.dim_conv3 = dim // n_div
        self.dim_untouched = dim - self.dim_conv3
        self.partial_conv3 = nn.Conv2d(self.dim_conv3, self.dim_conv3, 3, 1, 1, bias=False)

        if forward == 'slicing':
            self.forward = self.forward_slicing
        elif forward == 'split_cat':
            self.forward = self.forward_split_cat
        else:
            raise NotImplementedError

    def forward_slicing(self, x: Tensor) -> Tensor:
        # only for inference
        x = x.clone()  # !!! Keep the original input intact for the residual connection later
        x[:, :self.dim_conv3, :, :] = self.partial_conv3(x[:, :self.dim_conv3, :, :])

        return x

    def forward_split_cat(self, x: Tensor) -> Tensor:
        # for training/inference
        x1, x2 = torch.split(x, [self.dim_conv3, self.dim_untouched], dim=1)
        x1 = self.partial_conv3(x1)
        x = torch.cat((x1, x2), 1)

        return x


class MLPBlock(nn.Module):

    def __init__(self,
                 dim,
                 n_div,
                 mlp_ratio,
                 drop_path,
                 layer_scale_init_value,
                 act_layer,
                 norm_layer,
                 pconv_fw_type
                 ):

        super().__init__()
        self.dim = dim
        self.mlp_ratio = mlp_ratio
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.n_div = n_div

        mlp_hidden_dim = int(dim * mlp_ratio)

        mlp_layer: List[nn.Module] = [
            nn.Conv2d(dim, mlp_hidden_dim, 1, bias=False),
            norm_layer(mlp_hidden_dim),
            act_layer(),
            nn.Conv2d(mlp_hidden_dim, dim, 1, bias=False)
        ]

        self.mlp = nn.Sequential(*mlp_layer)

        self.spatial_mixing = Partial_conv3(
            dim,
            n_div,
            pconv_fw_type
        )

        if layer_scale_init_value > 0:
            self.layer_scale = nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
            self.forward = self.forward_layer_scale
        else:
            self.forward = self.forward

    def forward(self, x: Tensor) -> Tensor:
        shortcut = x
        x = self.spatial_mixing(x)
        # print(self.mlp(x))
        # print(self.drop_path(self.mlp(x)))
        x = shortcut + self.drop_path(self.mlp(x))
        return x

    def forward_layer_scale(self, x: Tensor) -> Tensor:
        shortcut = x
        x = self.spatial_mixing(x)
        x = shortcut + self.drop_path(
            self.layer_scale.unsqueeze(-1).unsqueeze(-1) * self.mlp(x))
        return x


class PatchMerging(nn.Module):

    def __init__(self, patch_size2, patch_stride2, dim, norm_layer):
        super().__init__()
        self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=patch_size2, stride=patch_stride2, bias=False)
        if norm_layer is not None:
            self.norm = norm_layer(2 * dim)
        else:
            self.norm = nn.Identity()

    def forward(self, x: Tensor) -> Tensor:
        x = self.norm(self.reduction(x))
        return x


class FasterBlock(nn.Module):
    # 构造FasterBlcok
    def __init__(self, dim):
        super(FasterBlock, self).__init__()
        norm_layer = nn.BatchNorm2d
        act_layer = partial(nn.ReLU, inplace=True)
        self.MLPBlock = MLPBlock(dim=dim,
                                 n_div=4,
                                 mlp_ratio=2,
                                 drop_path=0.1,
                                 layer_scale_init_value=0,
                                 norm_layer=norm_layer,
                                 act_layer=act_layer,
                                 pconv_fw_type='split_cat'
                                 )
        self.merge = PatchMerging(
            patch_size2=2,
            patch_stride2=2,
            dim=dim,
            norm_layer=norm_layer
        )

    def forward(self, x):
        out = self.MLPBlock(x)
        out = self.merge(out)
        return out


class FasterNet(nn.Module):
    def __init__(self, block, blocks_num, in_channels, out_channels, fc1_in_channels):
        super(FasterNet, self).__init__()
        self.conv0 = nn.Conv2d(in_channels, 8, kernel_size=2, stride=2, bias=False)
        # 卷积层
        self.layer = self._make_layer(block, 8, blocks_num)
        # 全连接层
        self.fc1 = nn.Linear(fc1_in_channels, 256)
        self.fc2 = nn.Linear(256, 256)
        self.fc3 = nn.Linear(256, 128)
        self.fc4 = nn.Linear(128, out_channels)
        self.tanh = nn.Tanh()

    def _make_layer(self, block, channel, block_num):
        layers = []
        for i in range(block_num):
            layers.append(block(channel * (2 ** i)))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv0(x)
        x = self.layer(x)
        hidden_x = torch.flatten(x, 1)
        # x = torch.cat((x, t), 1)
        x = self.tanh(self.fc1(hidden_x))
        x = self.tanh(self.fc2(x))
        x = self.tanh(self.fc3(x))
        x = self.tanh(self.fc4(x))
        return x


def faster_net(in_channels=1, out_channels=6):
    return FasterNet(FasterBlock, 3, in_channels=in_channels, out_channels=out_channels, fc1_in_channels=4096)


if __name__ == '__main__':
    model = faster_net()
    # print(model)
    # print(model.parameters())
    _x = torch.randn(1, 1, 128, 128).requires_grad_(True)  # 定义一个网络的输入值
    y = model.forward(_x)  # 获取网络的预测值
    MyConvNetVis = make_dot(y, params=dict(list(model.named_parameters()) + [('x', _x)]))
    MyConvNetVis.format = "png"
    # 指定文件生成的文件夹
    MyConvNetVis.directory = "C:/Users/adminTKJ/Desktop/RLIR_sumup/assets/network_structure"
    # 生成文件
    MyConvNetVis.view()
    # 查看参数个数
    print(get_parameter_number(model))
