# ------------------------------------------------------------------------------
# Copyright (c) HQU
# Licensed under the HQU License.
# Written by Wang Youjije (youjieWang@stu.hqu.edu.cn)
# Modified by Wang Youjije (youjieWang@stu.hqu.edu.cn)
# ------------------------------------------------------------------------------
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial

from torch import Tensor

from models.AttentionBlocks import E_ECABasicBlock, E_ECABottleneck

from timm.models.layers import DropPath, to_2tuple, trunc_normal_

import logging
import os
import copy

BN_MOMENTUM = 0.1
# 定义日志全局变量
logger = logging.getLogger(__name__)  # __name__记录用例名

blocks_dict = {
    'BASIC': E_ECABasicBlock,
    'BOTTLENECK': E_ECABottleneck
}

# 模型代码后面弄一下接口写得更好


class Mlp(nn.Module):
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super(Mlp, self).__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = act_layer()  # 激活函数层
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop = nn.Dropout(drop)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act(x)
        x = self.fc2(x)
        x = self.drop(x)

        return x


class Attention(nn.Module):
    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
        '''
        :param dim: 数据的维度
        :param num_headers:  多头注意力机制中的头
        :param qkv_bias:  qkv向量中是否要有偏置
        :param qk_scale:  qk向量的维度
        :param attn_drop: 注意力结果drop的程度
        :param proj_drop:  线性投影的drop
        :param sr_ratio:  这个是作者定义的srattention，用于减少kv的维度
        '''
        super(Attention, self).__init__()
        self.dim = dim
        self.num_heads = num_heads
        print(num_heads)
        head_dim = dim // num_heads
        self.scale = qk_scale or head_dim ** -0.5  # 这边是qk乘积之后要除于他的维度的开根号，解释是：数据的归一化

        self.q = nn.Linear(dim, dim, bias=qkv_bias)  # 定义生成q向量
        self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)  # k、v向量并在一起
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)  # 经过一个线性投影层
        self.proj_drop = nn.Dropout(proj_drop)

        self.sr_ratio = sr_ratio
        if sr_ratio > 1:
            self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
            self.norm = nn.LayerNorm(dim)

    def forward(self, x, H, W):
        x = x.permute(1, 0, 2)
        B, N, C = x.shape
        # print('B: ', B)
        # print('N: ', N)
        # print('C: ', C)

        q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)  # 生成q向量，然后reshape成多头的形式

        # self.sr_ratio>1说明说用到了sr注意力
        if self.sr_ratio > 1:
            x_ = x.permute(0, 2, 1).reshape(B, C, H, W)  # 相当于x传进来的是sequence的形式转换成feature map的形式。
            x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)  # 通过conv降低x的维度，用于生成kv向量
            x_ = self.norm(x_)
            kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        # 这边不进行srattention操作
        else:
            kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        k, v = kv[0], kv[1]

        attn = (q @ k.transpose(-2, -1)) * self.scale  # 这个为计算注意力的公式
        attn = attn.softmax(dim=-1)  # 进行softmax操作
        attn = self.attn_drop(attn)

        x = (attn @ v).transpose(1, 2).reshape(B, N, C)  # attn乘于v操作
        x = self.proj(x)
        x = self.proj_drop(x)
        x = x.permute(1, 0, 2)

        return x


# 这部分还没有写，明天去写，Transformer这部分比较死板，大部分都是可以直接复制的
class Block(nn.Module):
    def __init__(self, dim, num_heads, mlp_ratio=4, qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
        '''
        :param dim:  表示数据的维度吧
        :param norm_layer: 归一化函数
        '''
        super(Block, self).__init__()
        self.norm1 = norm_layer(dim)
        self.attn = Attention(
            dim,
            num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
            attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
        # DropPath,用于对整个网络进行Dropout，而nn.Dropout用于对其中的一个单元进行dropout
        # nn.Identity()用于保存网络参数
        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)
        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)

    def forward(self, x, H, W):
        residual1 = x
        x = self.norm1(x)  # 首先经过一个归一化函数
        x = self.attn(x, H, W)  # 然后经过注意力机制
        x = self.drop_path(x)
        x = residual1 + x

        residual2 = x
        x = self.norm2(x)
        x = self.mlp(x)
        x = self.drop_path(x)
        x = residual2 + x

        return x


class PatchEmbed(nn.Module):
    """ Image to Patch Embedding
    """

    def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
        super().__init__()
        img_size = to_2tuple(img_size)  # 224
        patch_size = to_2tuple(patch_size)  # 16

        self.img_size = img_size
        self.patch_size = patch_size
        assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \
            f"img_size {img_size} should be divided by patch_size {patch_size}."
        self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]  # 16, 16
        self.num_patches = self.H * self.W
        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
        self.norm = nn.LayerNorm(embed_dim)

    def forward(self, x):
        B, C, H, W = x.shape
        print('begin:', x.shape)  # [2, 3, 224, 224]
        x1 = self.proj(x)
        print('proj:', x1.shape)  # [2, 768, 14, 14]
        x1 = x1.flatten(2)
        print('flatten: ', x1.shape)  # [2, 768, 196]
        x1 = x1.transpose(1, 2)
        print('transpose: ', x1.shape)  # [2, 196, 768]
        x = self.proj(x).flatten(2).transpose(1,
                                              2)  # 这边还是展开了  Batch_Size * 768 * 14 * 14  ->Batch_Size * (14 * 14) * 768

        x = self.norm(x)
        H, W = H // self.patch_size[0], W // self.patch_size[1]

        return x, (H, W)


class TransformerDecoder(nn.Module):

    def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
        super().__init__()
        self.layers = _get_clones(decoder_layer, num_layers)
        self.num_layers = num_layers
        self.norm = norm
        self.return_intermediate = return_intermediate

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None):
        output = tgt

        intermediate = []

        for layer in self.layers:
            output = layer(output, memory, tgt_mask=tgt_mask,
                           memory_mask=memory_mask,
                           tgt_key_padding_mask=tgt_key_padding_mask,
                           memory_key_padding_mask=memory_key_padding_mask,
                           pos=pos, query_pos=query_pos)
            if self.return_intermediate:
                intermediate.append(self.norm(output))

        if self.norm is not None:
            output = self.norm(output)
            if self.return_intermediate:
                intermediate.pop()
                intermediate.append(output)

        if self.return_intermediate:
            return torch.stack(intermediate)

        return output.unsqueeze(0)


# Transformer DecoderLayer
# 我们只需要一层Decoder层就可以了
class TransformerDecoderLayer(nn.Module):
    def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False):
        super(TransformerDecoderLayer, self).__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)
        self.normalize_before = normalize_before

    def with_pos_embed(self, tensor, pos: Optional[Tensor]):
        return tensor if pos is None else tensor + pos

    def forward_post(self, tgt, memory,
                     tgt_mask: Optional[Tensor] = None,
                     memory_mask: Optional[Tensor] = None,
                     tgt_key_padding_mask: Optional[Tensor] = None,
                     memory_key_padding_mask: Optional[Tensor] = None,
                     pos: Optional[Tensor] = None,
                     query_pos: Optional[Tensor] = None):
        q = k = self.with_pos_embed(tgt, query_pos)  # 返回加了位置编码之后q和k向量
        tgt2 = self.self_attn(q, k, vlaue=tgt, attn_mask=tgt_mask,
                              key_padding_mask=tgt_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)
        tgt = self.norm1(tgt)

        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)
        tgt = self.norm2(tgt)

        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
        tgt = tgt + self.dropout3(tgt2)
        tgt = self.norm3(tgt)
        return tgt

    def forward_pre(self, tgt, memory,
                    tgt_mask: Optional[Tensor] = None,
                    memory_mask: Optional[Tensor] = None,
                    tgt_key_padding_mask: Optional[Tensor] = None,
                    memory_key_padding_mask: Optional[Tensor] = None,
                    pos: Optional[Tensor] = None,
                    query_pos: Optional[Tensor] = None):
        tgt2 = self.norm1(tgt)
        q = k = self.with_pos_embed(tgt2, query_pos)
        tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
                              key_padding_mask=tgt_key_padding_mask)[0]
        tgt = tgt + self.dropout1(tgt2)
        tgt2 = self.norm2(tgt)
        tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
                                   key=self.with_pos_embed(memory, pos),
                                   value=memory, attn_mask=memory_mask,
                                   key_padding_mask=memory_key_padding_mask)[0]
        tgt = tgt + self.dropout2(tgt2)
        tgt2 = self.norm3(tgt)
        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
        tgt = tgt + self.dropout3(tgt2)
        return tgt

    def forward(self, tgt, memory,
                tgt_mask: Optional[Tensor] = None,
                memory_mask: Optional[Tensor] = None,
                tgt_key_padding_mask: Optional[Tensor] = None,
                memory_key_padding_mask: Optional[Tensor] = None,
                pos: Optional[Tensor] = None,
                query_pos: Optional[Tensor] = None):
        if self.normalize_before:
            return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
                                    tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
        return self.forward_post(tgt, memory, tgt_mask, memory_mask,
                                 tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)



#GCN
class GCNBlock(nn.Module):
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(GCNBlock, self).__init__()
        self.conv1_1 = nn.Conv2d(inplanes, planes, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))  # path1 3*1 convd
        self.conv1_2 = nn.Conv2d(planes, planes, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))  # path1 1*3 convd

        self.conv2_1 = nn.Conv2d(inplanes, planes, kernel_size=(1, 3), stride=(1, 1), padding=(0, 1))  # path1 1*3 convd
        self.conv2_2 = nn.Conv2d(planes, planes, kernel_size=(3, 1), stride=(1, 1), padding=(1, 0))  # path1 1*3 convd
        self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        x_path1 = x
        x_path2 = x

        # path1
        out_path1 = self.conv1_1(x_path1)
        out_path1 = self.conv1_2(out_path1)
        out_path1 = self.bn1(out_path1)

        # path2
        out_path2 = self.conv2_1(x_path2)
        out_path2 = self.conv2_2(out_path2)
        out_path2 = self.bn2(out_path2)

        # assemble
        out = out_path1 + out_path2
        out = self.relu(out)

        return out






# TODO 1、这边就是生成_make_layer,然后从make_layer的输出中取出中间结果
# 2、需要多少个make_layer可以通过config指定参数
# 3、通过每个make_layer取出的特征最后做一个特征融合


class CNT(nn.Module):
    def __init__(self, block, image_size: list, num_inchannels: list, layers: list, k_size: list, fuse_method,
                 embed_dim: int, num_heads, mlp_ratios, qkv_bias=False, qk_scale=None, drop_rate=0.,
                 attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
                 depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]):
        # TODO 这个维度暂时还不知道要设置成多少,应该是输入的通道的维数
        '''
        :param stage:
        :param block: 使用的模块BasicBlock 还是BottleNeck
        :param image_size 输出后分辨率图片的长宽高
        :param num_inchannels 通道数是一个列表
        :param layers: layer是每一个layer的block的个数  是一个列表，这个应该是从参数文件中传入的
        :param k_size默认是使用3作为通道注意力参数
        :param fuse_method 表示特征融合的方法
        :param embed_dim 表示Transformer输入的维度, 这个应该是通道数C
        :param drop_rate 表示dropout的比例参数
        :param num_heads 多头注意力机制中的多头
        :param depths这个参数好像没有用
        '''

        super(CNT, self).__init__()
        self.inplanes = 48
        self.num_inchannels = num_inchannels
        self.fuse_method = fuse_method
        self.depths = depths
        # 特征融合权重比例
        # self.gamma = nn.Parameter(torch.randn(num_inchannels[0]))
        self.layer = []  # 用于保存不同分辨率的卷积操作
        for i in range(len(self.num_inchannels)):  # 这边的取值应该是0-self.stage-1
            if i == 0:  # 第一个不需要降采样
                # print(num_inchannels)
                self.layer.append(self._make_layer(block, num_inchannels[i], layers[i], k_size=k_size[i]))
            else:
                self.layer.append(self._make_layer(block, num_inchannels[i], layers[i], k_size=k_size[i], stride=2))
        # self.layers = nn.Sequential(*list([m for m in self.layer]))
        self.layers = nn.ModuleList(self.layer)

        # ---------------------------新增特征融合部分--------------------------
        self.UpSam = []
        for i in range(len(self.num_inchannels)):
            if i == 0:
                self.UpSam.append(None)
            else:
                self.UpSam.append(nn.Upsample(scale_factor=2 ** i, mode='nearest'))  # scale_factor:表示输出为输入的多少倍
        self.UpSams = nn.ModuleList(self.UpSam)

        self.conv3x3_1 = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
        self.conv3x3_2 = nn.Conv2d(in_channels=num_inchannels[0], out_channels=num_inchannels[0], kernel_size=3, stride=1, padding=1, bias=False)
        self.conv3x3_3 = nn.Conv2d(in_channels=num_inchannels[0], out_channels=num_inchannels[0], kernel_size=3, stride=1, padding=1, bias=False)
        # 增加位置信息的融合
        self.conv1x1_pos_layers = self._make_conv1x1_layers()
        # -------------------------------------------------------------

        self.fuse_layers = self._make_fuse_layers()  # 这边定义了特征融合之后的结果
        
        # self.conv3_CGNS = self._make_conv3_GCNs()
        # self.fuse_layers2 = self._make_fuse_layers2()
        self.relu = nn.ReLU(inplace=True)
        self.elu = nn.ELU(alpha=1.0, inplace=True)
        self.norm = nn.LayerNorm(embed_dim)

        # TODO pos_embed, 这边只是简单的使用nn.Parameter生成，不知道和使用其他方法生成有什么差别
        # self.patch_embed1.num_patches这个有问题
        self.pos_embed = nn.Parameter(torch.zeros( image_size[0] * image_size[1], 1, embed_dim))
        self.pos_drop = nn.Dropout(p=drop_rate)

        # transformer encoder
        # dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]  # stochastic depth decay rule
        # cur = 0
        # drop_path这个不知道怎么处理
        self.block = Block(
            dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratios, qkv_bias=qkv_bias, qk_scale=qk_scale,
            drop=drop_rate, attn_drop=attn_drop_rate, drop_path=0, norm_layer=norm_layer,
            sr_ratio=sr_ratios[0]
        )

        # self.spn = SPN()

        # 定义Block
        # self.block1 = nn.ModuleList([Block(
        #     dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
        #     drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
        #     sr_ratio=sr_ratios[0])
        #     for i in range(depths[0])])

        # patch_embed
        # self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans,
        #                                embed_dim=embed_dims[0])

        # 初始化参数
        trunc_normal_(self.pos_embed, std=.02)


    def _make_layer(self, block, planes, blocks, k_size, stride=1):
        downsample = None
        # stride != 1会执行降采样，self.inplanes != planes * block.expansion输出跟输入不等也会降采样
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )
        layers = []
        # 只有每一个stage的第一个模块需要进行降采样
        layers.append(block(self.inplanes, planes, stride, downsample, k_size))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes, k_size=k_size))

        return nn.Sequential(*layers)

    def _make_fuse_layers(self):
        # 不知道读取文件的时候为啥行得通， self.num_channels == 1
        # if self.stage == 1 or len(self.num_inchannels) == 1:
        if len(self.num_inchannels) == 1:
            return None

        num_inchannels = self.num_inchannels
        fuse_layers = []

        for j in range(len(num_inchannels)):
            # j代表当前正在处理的分支
            if j == 0:
                fuse_layers.append(None)
            elif j > 0:
                # 进行上采样
                fuse_layers.append(
                    nn.Sequential(
                        nn.Conv2d(
                            num_inchannels[j],
                            num_inchannels[0],
                            1, 1, 0, bias=False
                        ),
                        nn.BatchNorm2d(num_inchannels[0]),
                        nn.Upsample(scale_factor=2 ** (j - 0), mode='nearest')  # scale_factor:表示输出为输入的多少倍
                    )
                )
            else:
                # 其他情况不处理
                pass

        return nn.ModuleList(fuse_layers)
    
    # ---------增加的特征融合-----------------------------
    def _make_conv1x1_layers(self):
        # 不知道读取文件的时候为啥行得通， self.num_channels == 1
        # if len(self.num_inchannels) == 1:
        #     return None

        num_inchannels = self.num_inchannels
        conv1x1_layers = []

        for j in range(len(num_inchannels)):
            # j代表当前正在处理的分支
            conv1x1_layers.append(nn.Sequential(
                nn.Conv2d(
                    num_inchannels[j],
                    1,
                    1, 1, 0, bias=False
                )
            ))
        return nn.ModuleList(conv1x1_layers)    
    # def _make_conv3_GCNs(self):
    #     # 不知道读取文件的时候为啥行得通， self.num_channels == 1
    #     # if self.stage == 1 or len(self.num_inchannels) == 1:
    #     num_inchannels = self.num_inchannels
    #     conv3_GCN_layers = []
    #
    #     for j in range(len(num_inchannels)):
    #         if j != len(num_inchannels) - 1:
    #             conv3_GCN_layers.append(
    #                 nn.Sequential(
    #                     nn.Conv2d(num_inchannels[j], num_inchannels[j], kernel_size=3, stride=1, padding=1, bias=False),
    #                     GCNBlock(num_inchannels[j], num_inchannels[j])
    #                 )
    #             )
    #         else:
    #             conv3_GCN_layers.append(
    #                 nn.Sequential(
    #                     nn.Conv2d(num_inchannels[j], num_inchannels[j], kernel_size=3, stride=1, padding=1, bias=False),
    #                 )
    #             )
    #
    #     return nn.ModuleList(conv3_GCN_layers)

    # def _make_conv3_GCNs(self):
    #     # 不知道读取文件的时候为啥行得通， self.num_channels == 1
    #     # if self.stage == 1 or len(self.num_inchannels) == 1:
    #     num_inchannels = self.num_inchannels
    #     conv3_GCN_layers = []

    #     for j in range(len(num_inchannels)):
    #         conv3_GCN_layers.append(
    #             nn.Sequential(
    #                 nn.Conv2d(num_inchannels[j], num_inchannels[j], kernel_size=3, stride=1, padding=1, bias=False),
    #                 nn.ReLU(inplace=True),
    #                 GCNBlock(num_inchannels[j], num_inchannels[j])
    #             )
    #         )
    #     return nn.ModuleList(conv3_GCN_layers)

    # def _make_fuse_layers2(self):
    #     # 不知道读取文件的时候为啥行得通， self.num_channels == 1
    #     # if self.stage == 1 or len(self.num_inchannels) == 1:
    #     if len(self.num_inchannels) == 1:
    #         return None

    #     num_inchannels = self.num_inchannels
    #     fuse_layers = []

    #     for j in range(len(num_inchannels)):
    #         # j代表当前正在处理的分支
    #         if j == 0:
    #             fuse_layers.append(None)
    #         elif j > 0:
    #             # 进行上采样
    #             fuse_layers.append(
    #                 nn.Sequential(
    #                     nn.Conv2d(
    #                         num_inchannels[j],
    #                         num_inchannels[j-1],
    #                         1, 1, 0, bias=False
    #                     ),
    #                     nn.BatchNorm2d(num_inchannels[j-1]),
    #                     nn.Upsample(scale_factor=2, mode='nearest')  # scale_factor:表示输出为输入的多少倍
    #                 )
    #             )
    #         else:
    #             # 其他情况不处理
    #             pass

    #     return nn.ModuleList(fuse_layers)


    def forward(self, x_s, x_f):
        '''
        前向传播
        :param x_s: 转换过后的图像的sequence，用于Transformer的输入
        :param x_f: CNN输出的feature map用于下一个stage的CNN的输入
        :return:
        '''
        # 1、x_f特征图传入通道注意力机制的CNN中
        # 2、CNN多阶段降采样操作，以及上采样特征融合操作
        # 3、输出x，转换成序列
        # 4、进行特征融合，还有位置嵌入
        # 5、传入Transformer encoder进行处理
        B, C, H, W = x_f.shape
        # x_fuse = x_f  # 暂时等于输入的feature map的值
        layer_feature = []
        # conv3_GCNs = []   

        # 这个是使用1x1conv获取特征的位置信息
        conv1x1_pose_feature = []


        for i in range(len(self.num_inchannels)):
            x_f = self.layers[i](x_f)
            layer_feature.append(x_f)

        # 增加1x1
        for i in range(len(self.num_inchannels)):
            conv1x1_f = self.conv1x1_pos_layers[i](layer_feature[i])
            conv1x1_pose_feature.append(conv1x1_f)

        # TODO why add this line code
        # if len(self.num_i hannels) == 1:
        #     x_fuse = layer_feature[0]

        #---------------特征融合v1--------------------
        # 不同分辨率的特征融合
        for i in range(len(self.fuse_layers)):
            if i == 0:
                x_fuse = layer_feature[0]  # 因为self.fuse_layers[0]==None,所以没办法进行处理，需要另外单独处理
                # x_fuse = GCNBlock(layer_feature[0])
            else:
                x_fuse = x_fuse + self.fuse_layers[i](layer_feature[i])  # feature fuse user add method
        
        # 不同分辨率的位置信息进行融合
        for i in range(len(self.conv1x1_pos_layers)):
            if i == 0:
                x_pos_fuse = conv1x1_pose_feature[i]
            else:
                x_pos_fuse = x_pos_fuse + self.UpSams[i](conv1x1_pose_feature[i])

        # 增加一个3x3的conv
        x_pos_fuse = self.conv3x3_1(x_pos_fuse)
        x_pos_fuse = self.relu(x_pos_fuse)
        x_fuse = self.conv3x3_2(x_fuse)
        x_fuse = self.relu(x_fuse)

        # 进行element_wise操作
        x_element_wise = x_fuse * x_pos_fuse

        x_element_wise = self.conv3x3_3(x_element_wise)


        x_fuse_out = self.relu(x_element_wise)  # Batch_size, C, H, W， 这个还要
        # 以上已经完成了CNN部分，然后就要进入一个线性层了
        x_fuse_out = x_fuse_out.flatten(2).permute(2, 0, 1)
        # ----------------------------------------

        # ----------特征融合做一下修改v2:整个过程就是语义感知模块-------------------
        # for i in range(len(self.fuse_layers2)):
        #     # 或者最后一个分辨率不做处理，这样子其实减少参数量
        #     # if i != len(self.fuse_layers) - 1:
        #     #     layer_feature[i] = self.conv3_CGNS(layer_feature[i])
        #     if i != len(self.fuse_layers2) - 1:
        #         conv3_GCNs.append(self.conv3_CGNS[i](layer_feature[i]))
        #     else:
        #         conv3_GCNs.append(layer_feature[i])

        # for i in range(len(self.fuse_layers2)-2, -1, -1):
        #     conv3_GCNs[i] = conv3_GCNs[i] + self.fuse_layers2[i+1](conv3_GCNs[i+1])
        #     conv3_GCNs[i] = self.relu(conv3_GCNs[i])  # 新增加的

        # x_fuse = conv3_GCNs[0]
        # x_fuse = self.relu(x_fuse)  # Batch_size, C, H, W， 这个还要
        # 以上已经完成了CNN部分，然后就要进入一个线性层了
        # x_fuse_out = x_fuse.flatten(2).permute(2, 0, 1)
        # -----------------------------------------

        # -------------------------------
        # 接下来就是进行与sequence进行融合了
        x_out = x_s + x_fuse_out
        x_out = self.norm(x_out)
        # 加入位置嵌入
        # print('x_out shape:', x_out.shape)
        x_out = x_out + self.pos_embed
        x_out = self.pos_drop(x_out)  # TODO 感觉这个可有可无，不知道为啥加这个
        # 这边就是传入Transformer里面进行计算，但是还没有做处理
        
        x_out = self.block(x_out, H, W)
        return x_out, x_fuse
        # --------------------------------

        # ---------------特征融合修改部分V2gamma*T+C-------------------
        # x_out1 = x_s + self.pos_embed
        # x_out1 = self.pos_drop(x_out1)  # TODO 感觉这个可有可无，不知道为啥加这个
        # # 这边就是传入Transformer里面进行计算，但是还没有做处理

        # x_out1 = self.block(x_out1, H, W)
        # x_out1 = x_out1 * self.gamma
        # #
        # # x_fuse_out = x_fuse_out * self.gamma
        # x_out1 = x_out1 + x_fuse_out
        # # x_out1 = self.elu(x_out1)
        # x_out2 = x_out1.permute(0, 1, 2).reshape(B, C, H, W)
        # x_out2 = self.relu(x_out2)

        # # TODO: 这边看一下需不需要进行进行激活函数之后再转换成Transformer输入的形状
        # x_out1 = x_out2.flatten(2).permute(2, 0, 1)

        # return x_out1, x_out2  # 返回两部分


class HRCNT(nn.Module):
    def __init__(self, cfg, depths=[3, 4, 6, 3], k_size=[3, 3, 3, 3], **kwargs):
        self.inplanes = 64
        extra = cfg['MODEL']['EXTRA']  # 这个不知道有什么用
        transformer_decoder = extra['TRANSFORMER_DECODER']

        super(HRCNT, self).__init__()

        # stem net
        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
                               bias=True)
        self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
        self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
                               bias=False)
        self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
        self.relu = nn.ReLU(inplace=True)
        # self.relu = nn.ReLU(inplace=False)
        # 这个是第一阶段  使用4个E_ECABottleneck
        self.layer1 = self._make_layer(E_ECABottleneck, 64, 4)

        # 第一个阶段到第二个阶段过渡，需要一个过渡层，把通道数变成一样的

        # 这个是第二阶段
        self.stage2_cfg = extra['STAGE2']
        num_channels = self.stage2_cfg['NUM_CHANNELS']  # 这个是一个list[48, 96]
        block = blocks_dict[self.stage2_cfg['BLOCK']]  # BASIC
        # num_channels每一个元素成语block.expansion
        # 如果是经过一个BasicBlock，那输入BasicBlock之前和输出BasicBlock的通道数是一样的
        # 如果是经过一个BottleNeck，那输入BottleNeck之前的通道数乘于4等于输出BottleNeck的通道数
        # TODO 这边的定义应该是有些问题的 stage2  num_channels=[48， 96]
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        print('stage2 num_channels: ',len(num_channels))
        # 这边设置一个过渡层，1、将通道数降低，2、判断是否是feature map还是sequence
        self.transition1 = self._make_transition_layer([256], num_channels)  # 通道数量变成一致

        # num_channels表示有多少个通道，每个通道表示一个分辨率所以len(num_channels)表示要进行多少次降采样
        self.stage2 = self._make_stage(self.stage2_cfg, num_channels)
        
        # 定义stage3，别忘了stage3的参数文件需要修改一下
        # TODO 这边的定义肯定是友问题的
        self.stage3_cfg = extra['STAGE3']
        num_channels = self.stage3_cfg['NUM_CHANNELS']

        block = blocks_dict[self.stage3_cfg['BLOCK']]  # BASIC
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        print('stage3 num_channels: ', len(num_channels))
        self.stage3 = self._make_stage(self.stage3_cfg, num_channels)

        # 定义stage4
        # --------------------------------------------------------
        self.stage4_cfg = extra['STAGE4']
        num_channels = self.stage4_cfg['NUM_CHANNELS']
        block = blocks_dict[self.stage4_cfg['BLOCK']]  # BASIC
        num_channels = [
            num_channels[i] * block.expansion for i in range(len(num_channels))
        ]
        print('stage4 num_channels: ', len(num_channels))
        self.stage4 = self._make_stage(self.stage4_cfg, num_channels)
        # ---------------------------------------------------------

        d_model = cfg.MODEL.DIM_MODEL  # 这边的维度应该是取最高分辨率的那个维度

        self.final_layers = self._make_final_layers(cfg)
        self.deconv_layers = self._make_deconv_layers(cfg)
        self.num_deconvs = extra.DECONV.NUM_DECONVS
        self.deconv_config = cfg.MODEL.EXTRA.DECONV

        # 最后加一个卷积层生层heatmap
        # self.final_layer = nn.Conv2d(
        #     in_channels=d_model,
        #     out_channels=cfg['MODEL']['NUM_JOINTS'],
        #     kernel_size=extra['FINAL_CONV_KERNEL'],
        #     stride=1,
        #     padding=1 if extra['FINAL_CONV_KERNEL'] == 3 else 0
        # )

        # TODO Transformer DecoderLayer
        # 这边的d_model应该是生成的最后的模型
        # decoder_layer = TransformerDecoderLayer(transformer_decoder['D_MODEL'], transformer_decoder['N_HEAD'],
        #                                         transformer_decoder['DIM_FEEDFORWARD'], transformer_decoder['DROPOUT'],
        #                                         transformer_decoder['ACTIVATION'],
        #                                         transformer_decoder['NORMALIZE_BEFORE'])
        # decoder_norm = nn.LayerNorm(transformer_decoder['D_MODEL'])
        # self.decoder = TransformerDecoder(decoder_layer, 1, decoder_norm,
        #                                   return_intermediate=transformer_decoder['RETURN_INTERMEDIATE_DEC'])
        # # 最后再通过一个线性层进行转化
        # self.joint_embed = nn.Linear(d_model, cfg['MODEL']['NUM_JOINTS'] * 2)

        # 参数初始化
        self.apply(self._init_weights)

    def _make_final_layers(self, cfg):
        input_channel = cfg.MODEL.DIM_MODEL
        extra = cfg.MODEL.EXTRA
        final_layers = []
        output_channel = cfg['MODEL']['NUM_JOINTS']
        final_layers.append(nn.Conv2d(
            in_channels=input_channel,
            out_channels=output_channel,
            kernel_size=extra.FINAL_CONV_KERNEL,
            stride=1,
            padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0
        ))

        deconv_cfg = extra.DECONV
        for i in range(deconv_cfg.NUM_DECONVS):
            input_channels = deconv_cfg.NUM_CHANNELS[i]
            output_channels = cfg.MODEL.NUM_JOINTS
            final_layers.append(nn.Conv2d(
                in_channels=input_channels,
                out_channels=output_channels,
                kernel_size=extra.FINAL_CONV_KERNEL,
                stride=1,
                padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0
            ))

        return nn.ModuleList(final_layers)

    def _make_deconv_layers(self, cfg):
        input_channels = cfg.MODEL.DIM_MODEL
        extra = cfg.MODEL.EXTRA
        deconv_cfg = extra.DECONV

        deconv_layers = []
        for i in range(deconv_cfg.NUM_DECONVS):
            if deconv_cfg.CAT_OUTPUT[i]:
                final_output_channels = cfg.MODEL.NUM_JOINTS
                input_channels += final_output_channels
            output_channels = deconv_cfg.NUM_CHANNELS[i]
            deconv_kernel, padding, output_padding = \
                self._get_deconv_cfg(deconv_cfg.KERNEL_SIZE[i])

            layers = []
            layers.append(nn.Sequential(
                nn.ConvTranspose2d(
                    in_channels=input_channels,
                    out_channels=output_channels,
                    kernel_size=deconv_kernel,
                    stride=2,
                    padding=padding,
                    output_padding=output_padding,
                    bias=False),
                nn.BatchNorm2d(output_channels, momentum=BN_MOMENTUM),
                nn.ReLU(inplace=True)
            ))
            for _ in range(cfg.MODEL.EXTRA.DECONV.NUM_BASIC_BLOCKS):
                layers.append(nn.Sequential(
                    E_ECABasicBlock(output_channels, output_channels),
                ))
            deconv_layers.append(nn.Sequential(*layers))
            input_channels = output_channels

        return nn.ModuleList(deconv_layers)

    def _get_deconv_cfg(self, deconv_kernel):
        if deconv_kernel == 4:
            padding = 1
            output_padding = 0
        elif deconv_kernel == 3:
            padding = 1
            output_padding = 1
        elif deconv_kernel == 2:
            padding = 0
            output_padding = 0

        return deconv_kernel, padding, output_padding

    def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
        '''
        :param num_channels_pre_layer: 前一阶段的通道数  是一个列表
        :param num_channels_cur_layer: 后一阶段的通道数  是一个列表
        :return: 第一阶段到第二阶段的过度，也就是一个通道的改变
        '''
        num_branches_cur = len(num_channels_cur_layer)  # 当前层的分辨率个数  num_channels_cur_layer为list  第一阶段只有一个[48]
        num_branches_pre = len(num_channels_pre_layer)  # 之前层的分辨率个数  num_channels_pre_layer为list  前一个阶段也只有一个[256]
        transition_layers = []  # 这个定义好像没有用

        # 如果只有一个分支的话，就是第一阶段到第二阶段的过度
        # 1、第一层使用3x3conv进行降维
        # kernels=3，stride=1，padding=1分辨率不变，改变通道数
        trans1 = nn.Sequential(
            nn.Conv2d(
                num_channels_pre_layer[0],
                num_channels_cur_layer[0],
                3, 1, 1, bias=False
            ),
            nn.BatchNorm2d(num_channels_cur_layer[0]),
            nn.ReLU(inplace=True)
        )
        return trans1

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        # planes=64, block.expansion=4, self.inplanes = 64
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(
                    self.inplanes, planes * block.expansion,
                    kernel_size=1, stride=stride, bias=False
                ),
                nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def _make_stage(self, layer_config, num_inchannels,
                    multi_scale_output=True):
        '''
        :param layer_config:  这个阶段的一些参数
        :param num_inchannels:  通过这个参数可以看出有几个分支，每个分支有几个通道
        :param multi_scale_output:
        :return:  返回两个参数
        '''
        num_modules = layer_config['NUM_MODULES']
        num_branches = layer_config['NUM_BRANCHES']
        num_blocks = layer_config['NUM_BLOCKS']
        num_channels = layer_config['NUM_CHANNELS']
        block = blocks_dict[layer_config['BLOCK']]
        fuse_method = layer_config['FUSE_METHOD']
        img_size = layer_config['IMAGE_SIZE']  # 是一个list类型,这边的img_size一直表示最高分辨率，即1/4原始分辨率
        layers = layer_config['LAYERS']
        k_size = layer_config['K_SIZE']
        num_head = layer_config['NUM_HEAD']
        mlp_ratios = layer_config['MLP_RATIOS']
        sr_ratios = layer_config['SR_RATIOS']
        embed_dim = layer_config['EMBED_DIM']
        # 这边就是传入两个参数
        # 这边就是CNT里面的结构设计，
        cnt = CNT(block, img_size, num_inchannels, layers, k_size, fuse_method, embed_dim, num_head, mlp_ratios,
                  qkv_bias=False, norm_layer=partial(nn.LayerNorm, eps=1e-6), sr_ratios=sr_ratios)
        return cnt

    # def forward(self, x, mask, query_embed, pos_embed):
    def forward(self, x):
        '''
        :param x:  B * 3 * 256 * 192
        :return:
        '''
        B = x.shape[0]  # Batch_size
        x = self.conv1(x)  # B * 64 * 128 * 96  1/2分辨率
        x = self.bn1(x)
        x = self.relu(x)
        x = self.conv2(x)  # 64*64*48  1/4分辨率
        x = self.bn2(x)
        x = self.relu(x)

        # 第一个阶段使用4个E_ECABottleneck
        x = self.layer1(x)
        # 第二个阶段，先过度，通道转换, 转换成sequence传入
        x = self.transition1(x)  # B*256*64*48--》B*48*64*48

        x_s = x.flatten(2).permute(2, 0, 1)  # B*48*64*48 -->B*48*(64*48)  -> B*(64*48)*48

        # print('x_s shape:', x_s.shape)
        # print("x[0]", x[0])
        # print('stage2')
        x_s, x = self.stage2(x_s, x)

        # print('satge2: x_s shape:', x_s.shape)
        # print('satge2: x shape:', x.shape)
        #
        # # TODO 4/22 这边还有stage3没写
        # print('stage3')
        x_s, x = self.stage3(x_s, x)

        # print('satge4: x_s shape:', x_s.shape)
        # print('satge4: x shape:', x.shape)
        # 5/12添加一个stage4
        x_s, x = self.stage4(x_s, x)


        #
        # # 4/22 这边后面还有两个部分没有完成，1、连接一个head生成一个heatmap--完成
        # # TODO 2、链接一个decoder模型，生成向量
        bs, c, h, w = x.shape
        x_out1 = x_s
        # x_out2 = x_s
        x_out1 = x_out1.permute(1, 2, 0).contiguous().view(bs, c, h, w)
        # print('x_out1 shape: ', x_out1.shape)
        x_out1 = self.relu(x_out1)
        # x_out1 = self.final_layer(x_out1)  # 这个还没有定义，用于生成最终的heatmap

        final_outputs = []
        y_out1 = self.final_layers[0](x_out1)
        final_outputs.append(y_out1)
        for i in range(self.num_deconvs):
            if self.deconv_config.CAT_OUTPUT[i]:
                x_out1 = torch.cat((x_out1, y_out1), 1)

            x_out1 = self.deconv_layers[i](x_out1)
            y_out1 = self.final_layers[i + 1](x_out1)
            final_outputs.append(y_out1)
        return final_outputs  # 这是一个有两个值的列表

        # -------------------------------------------------------------
        # TODO 连接一个decoder模型,生成坐标预测
        # pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
        # query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
        # mask = mask.flatten(1)
        # tgt = torch.zeros_like(query_embed)
        # x_out2 = self.decoder(tgt, x_out2, memory_key_padding_mask=mask,
        #              pos=pos_embed, query_pos=query_embed)
        # x_out2 = self.joint_embed(x_out2.transpose(1, 2))

        # return x_out1, x_out2
        # -------------------------------------------------------------

        # return x_out1
        # return x_f_out

    # TODO 4/21 初始化权重,这边还不知道要怎么处理,还没有写完
    def _init_weights(self, pretrained='', print_load_info=False):
        logger.info('=> init weights from normal distribution')
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.normal_(m.weight, std=0.001)
                for name, _ in m.named_parameters():
                    if name in ['bias']:
                        nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.ConvTranspose2d):
                nn.init.normal_(m.weight, std=0.001)
                for name, _ in m.named_parameters():
                    if name in ['bias']:
                        nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
            elif isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=.02)
                if isinstance(m, nn.Linear) and m.bias is not None:
                    nn.init.constant_(m.bias, 0)
    # def _init_weights(self, m, pretrained='', print_load_info=False):
    #     logger.info('=> init weights from normal distribution')
    #     if isinstance(m, nn.Linear):
    #         trunc_normal_(m.weight, std=.02)
    #         if isinstance(m, nn.Linear) and m.bias is not None:
    #             nn.init.constant_(m.bias, 0)
    #     elif isinstance(m, nn.LayerNorm):
    #         nn.init.constant_(m.bias, 0)
    #         nn.init.constant_(m.weight, 1.0)
    #     elif isinstance(m, nn.Conv2d):
    #         nn.init.normal_(m.weight, std=0.001)
    #         for name, _ in m.named_parameters():
    #             if name in ['bias']:
    #                 nn.init.constant_(m.bias, 0)
    #     elif isinstance(m, nn.BatchNorm2d):
    #         nn.init.constant_(m.weight, 1)
    #         nn.init.constant_(m.bias, 0)
    #     elif isinstance(m, nn.ConvTranspose2d):
    #         nn.init.normal_(m.weight, std=0.001)
    #         for name, _ in m.named_parameters():
    #             if name in ['bias']:
    #                 nn.init.constant_(m.bias, 0)
    #
    #     if os.path.isfile(pretrained):
    #         pretrained_state_dict = torch.load(pretrained)
    #         logger.info('=> loading pretrained model {}'.format(pretrained))
    #
    #         existing_state_dict = {}
    #         for name, m in pretrained_state_dict.items():
    #             if name.split('.')[0] in self.pretrained_layers and name in self.state_dict() \
    #                     or self.pretrained_layers[0] is '*':
    #                 existing_state_dict[name] = m
    #                 if print_load_info:
    #                     print(":: {} is loaded from {}".format(name, pretrained))
    #         self.load_state_dict(existing_state_dict, strict=False)
    #     elif pretrained:
    #         logger.error('=> please download pre-trained models first!')
    #         raise ValueError('{} is not exist!'.format(pretrained))


def _get_activation_fn(activation):
    """Return an activation function given a string"""
    if activation == "relu":
        return F.relu
    if activation == "gelu":
        return F.gelu
    if activation == "glu":
        return F.glu
    raise RuntimeError(F"activation should be relu/gelu, not {activation}.")


def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])


def build_transformer_decoder(args):
    return TransformerDecoderLayer


def get_pose_net(cfg, is_train, **kwargs):
    '''
    :param cfg: cfg参数
    :param is_train: 是否是训练状态
    :param kwargs: 其他参数
    :return:返回模型
    '''
    model = HRCNT(cfg, **kwargs)

    if is_train and cfg['MODEL']['INIT_WEIGHTS']:  # 如果是训练状态并且有初始化权重就模型初始化权重
        model._init_weights(cfg['MODEL']['PRETRAINED'])

    return model
