import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from models.attention import Attention, MyAttentionLayer
from models.resnet import BasicBlock, ResNetEncoding

class base_cnn_Model(nn.Module):
    def __init__(self, configs):
        super(base_cnn_Model, self).__init__()

        self.conv_block1 = nn.Sequential(
            nn.Conv1d(configs.input_channels, configs.kernel_nums, kernel_size=configs.kernel_size,
                      stride=configs.stride, bias=False, padding=(configs.kernel_size//2)),
            nn.BatchNorm1d(configs.kernel_nums),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1),
            nn.Dropout(configs.dropout)
        )

        self.conv_block2 = nn.Sequential(
            nn.Conv1d(configs.kernel_nums, configs.kernel_nums * 2, kernel_size=configs.kernel_size, stride=1, bias=False, padding=4),
            nn.BatchNorm1d(configs.kernel_nums* 2),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
        )

        self.conv_block3 = nn.Sequential(
            nn.Conv1d(configs.kernel_nums* 2, configs.final_out_channels, kernel_size=configs.kernel_size, stride=1, bias=False, padding=4),
            nn.BatchNorm1d(configs.final_out_channels),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1),
        )

        features_len = configs.features_len
        self.logits = nn.Linear(features_len * configs.model_output_dim, configs.num_classes)
        # self.logits = nn.Linear(3 * configs.final_out_channels, configs.num_classes)

    def forward(self, x_in):
        # print("X-in shape:", x_in.shape)
        x = self.conv_block1(x_in)
        x = self.conv_block2(x)
        x = self.conv_block3(x)

        x_flat = x.reshape(x.shape[0], -1)
        # print("x_flat shape:",x_flat.shape)  # torch.Size([2304, 128])
        # print("x.shape[0] :",x.shape[0])
        # print("x type:",type(x))
        logits = self.logits(x_flat)
        # print("logits.shape:",logits.shape)  # torch.Size([128, 6])
        return logits, x




class base_res_cnn_Model(nn.Module):
    def __init__(self, configs):
        super(base_res_cnn_Model, self).__init__()

        self.conv_block1 = nn.Sequential(
            nn.Conv1d(configs.input_channels, configs.kernel_nums, kernel_size=configs.kernel_size,
                      stride=configs.stride, bias=False, padding=(configs.kernel_size//2)),
            nn.BatchNorm1d(configs.kernel_nums),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1),
            nn.Dropout(configs.dropout)
        )

        self.conv_block2 = nn.Sequential(
            nn.Conv1d(configs.kernel_nums, configs.kernel_nums * 2, kernel_size=configs.kernel_size, stride=1, bias=False, padding=4),
            nn.BatchNorm1d(configs.kernel_nums* 2),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
        )

        self.conv_block3 = nn.Sequential(
            # nn.Conv1d(configs.kernel_nums* 4, configs.final_out_channels, kernel_size=configs.kernel_size, stride=1, bias=False, padding=4),
            nn.Conv1d(configs.kernel_nums* 2, configs.kernel_nums* 4, kernel_size=configs.kernel_size, stride=1, bias=False, padding=4),
            nn.BatchNorm1d(configs.kernel_nums* 4),
            nn.ReLU(),
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1),
        )

        self.max_pool1 = nn.Sequential(
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
        )
        self.max_pool2 = nn.Sequential(
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
        )
        self.max_pool3 = nn.Sequential(
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
        )

        self.global_pool = nn.Sequential(
            nn.AdaptiveMaxPool1d(configs.final_out_channels)
        )

        features_len = configs.features_len
        self.logits = nn.Linear(features_len * configs.model_output_dim, configs.num_classes)
        # self.logits = nn.Linear(3 * configs.final_out_channels, configs.num_classes)

    def forward(self, x_in):
        # print("X-in shape:", x_in.shape)
        x_out1 = self.conv_block1(x_in)
        # x_in2 = F.relu(x_out1 + x_in)
        # x_in2 = self.max_pool1(x_in2)
        # x_in2 = x_out1
        x_out2 = self.conv_block2(x_out1)
        # x_in3 = F.relu(x_out2 + x_in2)
        # x_in3 = self.max_pool2(x_in3)
        x_out3 = self.conv_block3(x_out2)
        # x = F.relu(x_out3 + x_in3)
        x = self.global_pool(x_out3)

        x_flat = x.reshape(x.shape[0], -1)
        # print("x_flat shape:",x_flat.shape)  # torch.Size([2304, 128])
        # print("x.shape[0] :",x.shape[0])
        # print("x type:",type(x))
        logits = self.logits(x_flat)
        # print("logits.shape:",logits.shape)  # torch.Size([128, 6])
        return logits, x

class Bigru_self_attention_model(nn.Module):
    def __init__(self, configs):
        super(Bigru_self_attention_model, self).__init__()

        self.bigru_block1 = nn.GRU(configs.input_channels, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer1 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block2 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer2 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block3 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer3 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        # self.bigru_block4 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer4 = Attention(configs.features_len, heads=1, dropout=configs.dropout)
        #
        # self.bigru_block5 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer5 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        model_output_features_len = configs.model_output_features_len
        self.logits = nn.Linear(configs.model_output_features_len * configs.model_output_dim * 3, configs.num_classes)

    def forward(self, x_in):
        # skips = []
        # print(x_in.shape)
        output1, hidden1 = self.bigru_block1(x_in)

        # -------------------注意力层-----------------------------
        # print("output1[0].shape:", output1[0].shape)
        # print("hidden.shape:", hidden.shape)
        output_1  = self.attention_layer1(output1)
        # print("output_1[0].shape:", output_1[0].shape)
        # print("output.shape:", output.shape)
        # skips.append(output_1)
        # ------------------------------------------------
        output2, hidden2 = self.bigru_block2(output1)
        output_2 = self.attention_layer2(output2)
        # skips.append(output_2)
        # print("output_2[0].shape:", output_2[0].shape)

        output3, hidden3 = self.bigru_block3(output2)
        output_3 = self.attention_layer3(output3)
        # skips.append(output_3)

        # output4, hidden4 = self.bigru_block4(output3)
        # output_4 = self.attention_layer4(output4)
        # # skips.append(output_4)
        #
        # output5, hidden5 = self.bigru_block5(output4)
        # output_5 = self.attention_layer5(output5)
        # skips.append(output_5)
        # print("skips.shape:", skips)

        # final_output = torch.cat((output_1,output_2,output_3,output_4,output_5),1)
        final_output = torch.cat((output_1,output_2,output_3), -1)
        # final_output = torch.add((output_1,output_2,output_3,output_4,output_5),1)
        # print("final_output.shape:", final_output.shape)

        x_flat = final_output.reshape(final_output.shape[0], -1)
        # print("x_flat.shape:", x_flat.shape)

        logits = self.logits(x_flat)
        return logits, final_output


class Bigru_self_attention_5(nn.Module):
    def __init__(self, configs):
        super(Bigru_self_attention_5, self).__init__()

        self.bigru_block1 = nn.GRU(configs.input_channels, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer1 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block2 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer2 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block3 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer3 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block4 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer4 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block5 = nn.GRU(configs.hidden_dim * 2, configs.hidden_dim, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer5 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        features_len = configs.model_output_features_len
        self.logits = nn.Linear(features_len * configs.model_output_dim * 5, configs.num_classes)

    def forward(self, x_in):
        # skips = []
        # print(x_in.shape)
        output1, hidden1 = self.bigru_block1(x_in)

        # -------------------注意力层-----------------------------
        # print("output1[0].shape:", output1[0].shape)
        # print("hidden.shape:", hidden.shape)
        output_1  = self.attention_layer1(output1)
        # print("output_1[0].shape:", output_1[0].shape)
        # print("output.shape:", output.shape)
        # skips.append(output_1)
        # ------------------------------------------------
        output2, hidden2 = self.bigru_block2(output1)
        output_2 = self.attention_layer2(output2)
        # skips.append(output_2)
        # print("output_2[0].shape:", output_2[0].shape)

        output3, hidden3 = self.bigru_block3(output2)
        output_3 = self.attention_layer3(output3)
        # skips.append(output_3)

        output4, hidden4 = self.bigru_block4(output3)
        output_4 = self.attention_layer4(output4)
        # # skips.append(output_4)
        #
        output5, hidden5 = self.bigru_block5(output4)
        output_5 = self.attention_layer5(output5)
        # skips.append(output_5)
        # print("skips.shape:", skips)

        final_output = torch.cat((output_1,output_2,output_3,output_4,output_5),-1)
        # final_output = torch.cat((output_1,output_2,output_3), -1)
        # final_output = torch.add((output_1,output_2,output_3,output_4,output_5),1)
        # print("final_output.shape:", final_output.shape)

        x_flat = final_output.reshape(final_output.shape[0], -1)
        # print("x_flat.shape:", x_flat.shape)

        logits = self.logits(x_flat)
        return logits, final_output


# biblstm_self_attention_model
class biblstm_self_attention_model(nn.Module):
    def __init__(self, configs):
        super(biblstm_self_attention_model, self).__init__()

        self.bilstm_block1 = nn.LSTM(configs.input_channels, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer1 = Attention(configs.hidden_dim, heads=1, dropout=configs.dropout)

        self.bilstm_block2 = nn.LSTM(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer2 = Attention(configs.hidden_dim, heads=1, dropout=configs.dropout)

        self.bilstm_block3 = nn.LSTM(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer3 = Attention(configs.hidden_dim, heads=1, dropout=configs.dropout)

        # self.bigru_block4 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer4 = Attention(configs.features_len, heads=1, dropout=configs.dropout)
        #
        # self.bigru_block5 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer5 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        model_output_dim = configs.features_len
        self.logits = nn.Linear(model_output_dim * configs.final_out_channels * 3, configs.num_classes)

    def forward(self, x_in):
        # skips = []
        # print(x_in.shape)
        output1, hidden1 = self.bilstm_block1(x_in)
        # -------------------注意力层-----------------------------
        # print("output[0].shape:", output[0].shape)
        # print("hidden.shape:", hidden.shape)
        output_1  = self.attention_layer1(output1)
        # print("output[0].shape:", output[0].shape)
        # print("output.shape:", output.shape)
        # skips.append(output_1)
        # ------------------------------------------------
        output2, hidden2 = self.bilstm_block2(output1)
        output_2 = self.attention_layer2(output2)
        # skips.append(output_2)

        output3, hidden3 = self.bilstm_block3(output2)
        output_3 = self.attention_layer3(output3)
        # skips.append(output_3)

        # output4, hidden4 = self.bigru_block4(output3)
        # output_4 = self.attention_layer4(output4)
        # # skips.append(output_4)
        #
        # output5, hidden5 = self.bigru_block5(output4)
        # output_5 = self.attention_layer5(output5)
        # skips.append(output_5)
        # print("skips.shape:", skips)

        # final_output = torch.cat((output_1,output_2,output_3,output_4,output_5),1)
        final_output = torch.cat((output_1, output_2, output_3),1)
        # final_output = torch.add((output_1,output_2,output_3,output_4,output_5),1)
        # print("final_output.shape:", final_output.shape)

        x_flat = final_output.reshape(final_output.shape[0], -1)
        # print("x_flat.shape:", x_flat.shape)

        logits = self.logits(x_flat)
        return logits, final_output





class Bigru_self_attention_backup_model(nn.Module):
    def __init__(self, configs):
        super(Bigru_self_attention_backup_model, self).__init__()

        self.bigru_block1 = nn.GRU(configs.input_channels, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block2 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer2 = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block3 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer3 = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block4 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer4 = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block5 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer5 = Attention(configs.features_len, heads=1, dropout=0.1)

        model_output_dim = configs.features_len
        self.logits = nn.Linear(model_output_dim * configs.final_out_channels * 5, configs.num_classes)

    def forward(self, x_in):
        # skips = []
        print(x_in.shape)
        output1, hidden1 = self.bigru_block1(x_in)

        # -------------------注意力层-----------------------------
        # print("output[0].shape:", output[0].shape)
        # print("hidden.shape:", hidden.shape)
        output_1  = self.attention_layer(output1)
        # print("output[0].shape:", output[0].shape)
        # print("output.shape:", output.shape)
        # skips.append(output_1)
        # ------------------------------------------------
        output2, hidden2 = self.bigru_block2(output1)
        output_2 = self.attention_layer(output2)
        # skips.append(output_2)

        output3, hidden3 = self.bigru_block2(output2)
        output_3 = self.attention_layer(output3)
        # skips.append(output_3)

        output4, hidden4 = self.bigru_block2(output3)
        output_4 = self.attention_layer(output4)
        # skips.append(output_4)

        output5, hidden5 = self.bigru_block2(output4)
        output_5 = self.attention_layer(output5)
        # skips.append(output_5)
        # print("skips.shape:", skips)

        final_output = torch.cat((output_1,output_2,output_3,output_4,output_5),1)
        # final_output = torch.add((output_1,output_2,output_3,output_4,output_5),1)
        print("final_output.shape:", final_output.shape)

        x_flat = final_output.reshape(final_output.shape[0], -1)
        # print("x_flat.shape:", x_flat.shape)

        logits = self.logits(x_flat)
        return logits, final_output


# ------------EfficientResNet18CapsNet------------------------------------------------------

def squash(input, eps=10e-21):
    n = torch.norm(input, dim=-1, keepdim=True)
    return (1 - 1 / (torch.exp(n) + eps)) * (input / (n + eps))


def length(input):
    return torch.sqrt(torch.sum(input**2, dim=-1) + 1e-8)


def mask(input):
    if type(input) is list:
        input, mask = input
    else:
        x = torch.sqrt(torch.sum(input**2, dim=-1))
        mask = F.one_hot(torch.argmax(x, dim=1), num_classes=x.shape[1]).float()

    masked = input * mask.unsqueeze(-1)
    return masked.view(input.shape[0], -1)


class PrimaryCapsLayer(nn.Module):
    """Create a primary capsule layer where the properties of each capsule are extracted
    using a 2D depthwise convolution.

    Args:
        in_channels (int): depthwise convolution's number of features
        kernel_size (int): depthwise convolution's kernel dimension
        num_capsules (int): number of primary capsules
        dim_capsules (int): primary capsule dimension
        stride (int, optional): depthwise convolution's strides. Defaults to 1.
    """

    def __init__(self, in_channels, kernel_size, num_capsules, dim_capsules, stride=1):
        super(PrimaryCapsLayer, self).__init__()
        self.depthwise_conv = nn.Conv2d(
            in_channels=in_channels,
            out_channels=in_channels,
            kernel_size=kernel_size,
            stride=stride,
            groups=in_channels,
            padding="valid",
        )
        self.num_capsules = num_capsules
        self.dim_capsules = dim_capsules

    def forward(self, input):
        output = self.depthwise_conv(input)
        # print("output.size:", output.size(0))
        output = output.view(output.size(0), self.num_capsules, self.dim_capsules)
        return squash(output)


class RoutingLayer(nn.Module):
    """Self-attention routing layer using a fully-connected network, to create a parent
    layer of capsules.

    Args:
        num_capsules (int): number of primary capsules
        dim_capsules (int): primary capsule dimension
    """

    def __init__(self, user_divce, num_capsules, dim_capsules):
        super(RoutingLayer, self).__init__()
        # 中间两个参数为输入的：input_shape[-2] , input_shape[-1]
        # 下面w、b的参数也需要调整, 根据num_capsules，get_backbone方法
        # self.W = nn.Parameter(torch.Tensor(num_capsules, 64, 8, dim_capsules))
        self.W = nn.Parameter(torch.Tensor(num_capsules, 6, 6, dim_capsules))
        # self.b = nn.Parameter(torch.zeros(num_capsules, 64, 1))
        self.b = nn.Parameter(torch.zeros(num_capsules, 6, 1))
        self.num_capsules = num_capsules
        self.dim_capsules = dim_capsules
        self.user_divce = user_divce
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.kaiming_normal_(self.W)
        nn.init.zeros_(self.b)

    def forward(self, input):
        u = torch.einsum(
            "...ji,kjiz->...kjz", input, self.W
        )  # u shape = (None, num_capsules, height*width*16, dim_capsules)
        c = torch.einsum("...ij,...kj->...i", u, u)[
            ..., None
        ]  # b shape = (None, num_capsules, height*width*16, 1) -> (None, j, i, 1)
        if self.user_divce == "cpu":
            capsule_value = torch.Tensor([self.dim_capsules]).type(torch.FloatTensor)
        elif self.user_divce == "cuda":
            capsule_value = torch.Tensor([self.dim_capsules]).type(torch.cuda.FloatTensor)
        c = c / torch.sqrt(capsule_value)
        # capsule_value = torch.Tensor([self.dim_capsules]).type(torch.cuda.FloatTensor)  # 针对GPU
        # torch.Tensor([self.dim_capsules]).type(torch.FloatTensor)
        # torch.Tensor([self.dim_capsules])
        c = torch.softmax(c, axis=1)
        c = c + self.b
        s = torch.sum(
            torch.mul(u, c), dim=-2
        )  # s shape = (None, num_capsules, dim_capsules)
        final_v = squash(s)
        return final_v


# https://github.com/KushajveerSingh/resize_network_cv
# 用完 Learning to Resize Images for Computer Vision Tasks方法后再处理
class ResBlock(nn.Module):
    def __init__(self, channel_size: int, negative_slope: float = 0.2):
        super().__init__()
        self.block = nn.Sequential(
            nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1,
                      bias=False),
            nn.BatchNorm2d(channel_size),
            nn.LeakyReLU(negative_slope, inplace=True),
            nn.Conv2d(channel_size, channel_size, kernel_size=3, padding=1,
                      bias=False),
            nn.BatchNorm2d(channel_size)
        )

    def forward(self, x):
        return x + self.block(x)


class EfficientResNet18CapsNet(nn.Module):
    """Efficient-CapsNet architecture implementation.

    Args:
        nn (_type_): _description_
    """

    def __init__(self, configs, user_device="cuda"):
        super(EfficientResNet18CapsNet, self).__init__()
        # self.conv0 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
        # self.bn1 = nn.BatchNorm2d(64)
        # self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
        #
        # self.layer1 = nn.Sequential(RestNetBasicBlock(64, 64, 1),
        #                             RestNetBasicBlock(64, 64, 1))
        #
        # self.layer2 = nn.Sequential(RestNetDownBlock(64, 128, [2, 1]),
        #                             RestNetBasicBlock(128, 128, 1))

        self.resnet_backbone =  self.get_backbone(configs)
        # self.fc = self.resnet_backbone.fc

        # self.resnet_backbone =  resnet18()
        # self.resnet_backbone.conv1 = nn.Conv2d(1, 32, kernel_size=7, stride=2, padding=3, bias=False)
        # self.resnet_backbone.fc = nn.Identity()

        # self.conv1 = nn.Conv2d(
        #     in_channels=128, out_channels=32, kernel_size=3, stride=1,padding="valid"
        # )
        # self.batch_norm1 = nn.BatchNorm2d(num_features=32)
        # self.conv2 = nn.Conv2d(32, 64, 3,  stride=1, padding="valid")
        # self.batch_norm2 = nn.BatchNorm2d(64)
        # self.conv3 = nn.Conv2d(64, 64, 3, stride=1, padding="valid")
        # self.batch_norm3 = nn.BatchNorm2d(64)
        # self.conv4 = nn.Conv2d(64, 128, 3, stride=2, padding="valid")
        # self.batch_norm4 = nn.BatchNorm2d(128)

        self.primary_caps = PrimaryCapsLayer(
            # in_channels=256, kernel_size=8, num_capsules=32, dim_capsules=8  # 对应 get_backbone method 1
            # in_channels=512, kernel_size=8, num_capsules=64, dim_capsules=8  # 对应 get_backbone method 2
            in_channels=36, kernel_size=1, num_capsules=6, dim_capsules=6  # 对应 get_backbone method 3
        )
        # self.primary_caps_2 = PrimaryCapsLayer(
        #     in_channels=1, kernel_size=7, num_capsules=50, dim_capsules=10
        # )
        self.device = user_device
        # self.digit_caps = RoutingLayer(self.device, num_capsules=32, dim_capsules=16) # 对应 get_backbone method 1
        self.digit_caps = RoutingLayer(self.device, num_capsules= configs.num_classes, dim_capsules=8) # 对应 get_backbone method 2
        self.probs = None
        self.configs = configs
        self.reset_parameters()

    def get_backbone(self, configs):
        block = BasicBlock
        blocks_config = [
            # [32, 2], [64, 2], [128, 2], [256, 2]  # method 1
            # [64, 2], [128, 2], [256, 2], [512, 2]  # method 2
            [3, 2], [9, 2], [18, 2], [36, 2]  # method 2
        ]
        # first_config = [1, 32, 5, 2] # method 1
        # first_config = [1, 64, 5, 2] # method 2
        first_config = [1, 36, 3, 2] # method 2
        return ResNetEncoding(block, blocks_config, first_config, first_pool=True)

    def reset_parameters(self):
        # nn.init.kaiming_normal_(self.conv1.weight)
        # nn.init.kaiming_normal_(self.conv2.weight)
        # nn.init.kaiming_normal_(self.conv3.weight)
        # nn.init.kaiming_normal_(self.conv4.weight)
        nn.init.kaiming_normal_(self.resnet_backbone.conv1.weight)

    def forward(self, x):
        # x = self.conv0(x)
        # print("self.conv0(x):", x.shape)
        # x = self.bn1(x)
        # print("self.bn1(x):", x.shape)
        # x = self.avgpool(x)
        # print("self.maxpool(x):", x.shape)
        # x = self.layer1(x)
        # print("layer1(x):", x.shape)
        # x = self.layer2(x)
        # print("layer2(x):", x.shape)
        # x shape : (batch size, in chanel, width, high)
        if len(x.shape) == 3:
           x = x.unsqueeze(1)

        x = self.resnet_backbone(x)
        # x = torch.relu(self.batch_norm1(self.conv1(x)))
        # # print("conv1(x):",x.shape)
        # x = torch.relu(self.batch_norm2(self.conv2(x)))
        # # print("conv2(x):",x.shape)
        # x = torch.relu(self.batch_norm3(self.conv3(x)))
        # # print("conv3(x):",x.shape)
        # x = torch.relu(self.batch_norm4(self.conv4(x)))
        # print("conv4(x):",x.shape)
        x_in = self.primary_caps(x)
        # print("primary_caps:", x.shape)
        # x = self.primary_caps_2(x)
        # print("primary_caps2:", x.shape)
        x_out = self.digit_caps(x_in)
        probs = length(x_out)
        self.probs = probs
        return probs, x_in