import torch
import torch.nn as nn
import math
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from van_util import SE_block
from torchsummary import summary
from functools import partial
from tensorboardX import SummaryWriter


class DepthWiseConv(nn.Module):
    def __init__(self, input_size=768, kernel_size=3):
        """
        Depth-wise Convolution 的一个卷积核负责一个通道，一个通道只被一个卷积核卷积
        这个利用nn.Con2D 里的group选项就行 groups 决定了将原输入分为几组，而每组channel重用几次，由out_channels/groups计算得到
        :param input_size: 这个准确来说是 input_channel_size 后面的 input_size 都这意思
        :param kernel_size: 原文为3*3
        """
        super(DepthWiseConv, self).__init__()
        self.input_size = input_size
        self.kernel_size = kernel_size
        self.DWConv = nn.Conv2d(input_size, input_size, kernel_size, 1, 1, groups=input_size, bias=True)

    def forward(self, x):
        x = self.DWConv(x)
        return x


class LKA(nn.Module):
    def __init__(self, input_size, kernel_size_dilation, dilation=3):
        """
        DW-Conv -> DW-D-Conv -> 1*1Conv
        原文中两个层的 kernel_size都是写死的 padding没说怎么来的 我这里就额外加几个输入 方便后面修改
        :param input_size: [N ,input_size, H, W] input_size 在原文中输出后似乎不变
        :param kernel_size_dilation: LKA的第二层的卷积核大小 论文中为ceil(1.0 * raw_kernel_size / dilation)
        :param dilation: 空洞卷积 默认为1不采用
        """
        super(LKA, self).__init__()
        self.input_size = input_size
        self.kernel_size = kernel_size_dilation
        self.dilation = dilation
        self.LKA_layer = nn.Sequential(
            nn.Conv2d(input_size, input_size, 2 * dilation - 1, padding=2, groups=input_size),
            nn.Conv2d(input_size, input_size, kernel_size_dilation, padding=9, groups=input_size, dilation=dilation),
            SE_block(in_channel=input_size, ratio=16),
            nn.Conv2d(input_size, input_size, 1)
        )

    def forward(self, x):
        out = x.clone()
        out = self.LKA_layer(out)

        return out * x


class Attention(nn.Module):
    def __init__(self, input_size, kernel_size_dilation, dilation=1):
        super(Attention, self).__init__()
        self.input_size = input_size
        self.Att = nn.Sequential(
            nn.Conv2d(input_size, input_size, 1),
            nn.GELU(),
            LKA(input_size, kernel_size_dilation, dilation),
            nn.Conv2d(input_size, input_size, 1)
        )

    def forward(self, x):
        # 以下部分中的残差结构参照了源代码
        out = x.clone()
        out = self.Att(out)
        ######

        return x + out


class FFN(nn.Module):
    def __init__(self, input_size, hidden_size=0, output_size=0, dropout: float = 0.):
        """
            源代码里还有个 act_layer=nn.GELU 但是我暂时不打算换激活函数
        """
        super(FFN, self).__init__()
        self.input_size = input_size
        self.dropout = dropout
        self.output_size = output_size if output_size != 0 else input_size
        self.hidden_size = hidden_size if hidden_size != 0 else input_size

        self.fnn = nn.Sequential(
            nn.Conv2d(self.input_size, self.hidden_size, 1),
            DepthWiseConv(self.hidden_size, 3),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Conv2d(self.hidden_size, self.output_size, 1),
            nn.Dropout(dropout)
        )

        # 源代码里不知道有什么作用的部分
        self.apply(self._init_weights)
        ######

    def _init_weights(self, m):
        """
        :return: 以下部分参照了源代码 暂不明白作用
        """
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
        elif isinstance(m, nn.Conv2d):
            fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            fan_out //= m.groups
            m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
            if m.bias is not None:
                m.bias.data.zero_()

    def forward(self, x):
        x = self.fnn(x)

        return x


class VAN_Block(nn.Module):
    def __init__(self, input_size, raw_kernel_size=21, dropout: float = 0., dilation=3, drop_path_rate: float = 0.,
                 fnn_ratio=4., ):
        """
        :param input_size:
        :param raw_kernel_size: K 代表被分解的原大核大小 原文写的21*21
        :param dropout: FNN内的dropout
        :param dilation: depth/空洞卷积步长
        :param drop_path_rate:若x为输入的张量，其通道为[B,C,H,W]，那么drop_path的含义为在一个Batch_size中，随机有drop_prob的样本，、
        不经过主干，而直接由分支进行恒等映射。
        参考网页：https://blog.csdn.net/qq_43426908/article/details/121662843
        :param fnn_ratio: 源代码有的 我搬过来了 感觉像是densenet中的膨胀系数
        :argument 源代码还有 act_layer=nn.GELU
        """
        super(VAN_Block, self).__init__()
        self.input_size = input_size
        self.dropout = dropout
        self.drop_path = drop_path_rate
        self.dilation = dilation
        self.kernel_size_dilation = math.ceil((1.0 * raw_kernel_size) / dilation)

        self.Attblock = nn.Sequential(
            nn.BatchNorm2d(input_size),
            Attention(input_size, self.kernel_size_dilation, dilation)
        )

        # 以下部分参照了源代码 暂不明白作用
        # nn.Identity()是占位符 无实际作用
        self._drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
        ########

        self.hidden_size = input_size * fnn_ratio
        self.FNNblock = nn.Sequential(
            nn.BatchNorm2d(input_size),
            FFN(input_size, hidden_size=self.hidden_size, dropout=dropout),
        )

        # 以下部分参照了源代码 暂不明白作用
        layer_scale_init_value = 1e-2
        self.layer_scale_1 = nn.Parameter(
            layer_scale_init_value * torch.ones(input_size), requires_grad=True)
        self.layer_scale_2 = nn.Parameter(
            layer_scale_init_value * torch.ones(input_size), requires_grad=True)

        self.apply(self._init_weights)
        ######

    def _init_weights(self, m):
        """
        :return: 以下部分参照了源代码 暂不明白作用
        """
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
        elif isinstance(m, nn.Conv2d):
            fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            fan_out //= m.groups
            m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
            if m.bias is not None:
                m.bias.data.zero_()

    def forward(self, x):
        out1 = x
        out1 = x + self._drop_path(self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * self.Attblock(out1))
        out2 = out1
        out2 = out1 + self._drop_path(self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * self.FNNblock(out2))
        #####
        return out2


class OverlapPatchEmbed(nn.Module):
    """
    该模块与transformer中的PatchEmbed类似，是利用kernel_size=patch_size的conx对局部进行特征编码
    参考网页：https://blog.csdn.net/a486259/article/details/129402562
    """

    def __init__(self, img_size=224, patch_size=7, stride=4, in_channel=3, embed_dim=768):
        super(OverlapPatchEmbed, self).__init__()
        # 在 PyTorch 中，to_2tuple 是一个函数，用于将输入参数转换为长度为 2 的元组。
        # 通常这个函数被用于确保输入参数是一个长度为 2 的元组，以便在接下来的计算中使用
        patch_size = to_2tuple(patch_size)
        self.proj = nn.Conv2d(in_channel, embed_dim, kernel_size=patch_size, stride=stride,
                              padding=(patch_size[0] // 2, patch_size[1] // 2))
        self.norm = nn.BatchNorm2d(embed_dim)
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
        elif isinstance(m, nn.Conv2d):
            fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            fan_out //= m.groups
            m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
            if m.bias is not None:
                m.bias.data.zero_()

    def forward(self, x):
        x = self.proj(x)
        _, _, H, W = x.shape
        x = self.norm(x)
        return x, H, W


class VanSE(nn.Module):
    def __init__(self, img_size=224, in_channel=3, num_classes=100, embed_dims=[64, 128, 256, 512],
                 ffn_ratio=[4, 4, 4, 4], drop_out=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
                 depth=[3, 4, 6, 3], num_stage=4, flag=False):
        """
        :param img_size: 输入的图片大小 一般来说要resize成224
        :param in_channel: 输入的图片维度Channel 彩色为3
        :param num_classes: 分类数目 cifar-100 共100类
        :param embed_dims: 似乎是每一个stage的输出Channel
        :param ffn_ratio: 前馈神经网络的隐藏层膨胀系数
        :param drop_out: dropout
        :param drop_path_rate: drop_path的参数
        :param norm_layer: 使用何种normlayer 默认是LayerNorm？ 感觉CV更适合用BatchNorm啊
        :param depth: 一个stage由depth个VAN_Block构成
        :param num_stage: 共多少个stage
        :param flag: 很神奇的东西
        """
        super(VanSE, self).__init__()
        if flag is False:
            self.num_classes = num_classes
        self.depth = depth
        self.drop_out = drop_out
        self.drop_path_rate = drop_path_rate
        self.num_stage = num_stage
        self.img_size = img_size
        self.in_channel = in_channel

        # 此部分参照了源代码 因为我一开始不熟悉setattr 还有 ModuleList的用法 并且OverlapPatchEmbed似乎没在原文中出现过
        # setattr() 函数对应函数 getattr()，用于设置属性值，该属性不一定是存在的。
        # setattr(object, name, value)
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))]  # stochastic depth decay rule
        cur = 0

        for i in range(num_stage):
            patch_embed = OverlapPatchEmbed(img_size=img_size if i == 0 else img_size // (2 ** (i + 1)),
                                            patch_size=7 if i == 0 else 3,
                                            stride=4 if i == 0 else 2,
                                            in_channel=in_channel if i == 0 else embed_dims[i - 1],
                                            embed_dim=embed_dims[i])

            block = nn.ModuleList([VAN_Block(
                input_size=embed_dims[i], fnn_ratio=ffn_ratio[i], dropout=drop_out, drop_path_rate=dpr[cur + j],
                raw_kernel_size=21)
                for j in range(depth[i])])
            # 以上可以等价为：
            # block = nn.Sequential(*[VAN_Block(input_size=embed_dims[i], fnn_ratio=fnn_ratio[i], dropout=drop_out,
            #                                   drop_path_rate=dpr[cur + j],
            #                                   raw_kernel_size=21) for j in range(depth[i])])
            norm = norm_layer(embed_dims[i])
            cur += depth[i]

            setattr(self, f"patch_embed{i + 1}", patch_embed)
            setattr(self, f"block{i + 1}", block)
            # 有一说一 我还真不知道什么方法能平替setattr
            setattr(self, f"norm{i + 1}", norm)

        self.output = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
        self.apply(self._init_weights)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
        elif isinstance(m, nn.Conv2d):
            fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
            fan_out //= m.groups
            m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
            if m.bias is not None:
                m.bias.data.zero_()

    ######

    # Todo: 源代码这里还实现了一堆没用到的函数 估计是给测试用的 就先直接扒下来放这里了
    def freeze_patch_emb(self):
        self.patch_embed1.requires_grad = False

    @torch.jit.ignore
    def no_weight_decay(self):
        return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'}  # has pos_embed may be better

    def get_classifier(self):
        return self.output

    def reset_classifier(self, num_classes, global_pool=''):
        self.num_classes = num_classes
        self.output = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()

    ######
    def forward(self, x):
        # block*L -> flatten ->  LayerNorm -> ... -> output
        batch_size = x.shape[0]

        for i in range(self.num_stage):
            patch_embed = getattr(self, f"patch_embed{i + 1}")
            block = getattr(self, f"block{i + 1}")
            norm = getattr(self, f"norm{i + 1}")
            x, H, W = patch_embed(x)
            for blk in block:
                x = blk(x)
            x = x.flatten(2).transpose(1, 2)
            x = norm(x)
            if i != self.num_stage - 1:
                # 参考网页：https://blog.csdn.net/kdongyi/article/details/108180250
                x = x.reshape(batch_size, H, W, -1).permute(0, 3, 1, 2).contiguous()

        x = x.mean(dim=1)
        x = self.output(x)
        return x


if __name__ == "__main__":
    pass
