import torch
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
import timm
# import segmentation_models_pytorch as smp

class COPDNet(nn.Module):
    def __init__(self,model_name):
        super().__init__()
        if model_name == "densenet121":
            self.L = 1024
            self.D = 128
        elif model_name == "efficientnet_b4":
            self.L = 1792
            self.D = 256
        elif model_name == "resnet50" or model_name == "resnet26d":
            self.L = 2048
            self.D = 256
        elif model_name == "vgg16"or model_name == "resnet34" :
            self.L = 512
            self.D = 64
        elif model_name == "vit_large_patch16_224":
            self.L = 1024
            self.D = 16
        elif model_name == "vit_base_patch16_224":
            self.L = 768
            self.D = 12

        # self.L = 768
        # self.D = 64
        self.K = 1

        aux_params = {
            'pooling': 'avg',  # one of 'avg', 'max'
            'dropout': 0.2,  # dropout ratio, default is None
            'activation': 'sigmoid',  # activation function, default is None
            'classes': 3,  # define number of output labels
        }
        # Initialize the Unet model with a ResNet18 encoder
        # self.model = smp.Unet('resnet18', in_channels=3, classes=1, encoder_weights=None, aux_params=aux_params)

        # Load the pre-trained weights into the encoder
        # encoder_dict = torch.load(pretrained_path)['state_dict']
        # If the original model had a fully connected layer, it's not needed for just the encoder
        # encoder_dict = {k: v for k, v in encoder_dict.items() if k.startswith('encoder.')}

        # self.model.encoder.load_state_dict(encoder_dict, strict=False)
            # vit_base_patch16_224 vit_small_r26_s16_224_in21k,mobilenet_v2,vgg16resnet34
        self.pretrained_model = timm.create_model(model_name, pretrained=False)
        if model_name == "densenet121":
            self.feature_extractor_part2 = nn.Sequential(
                nn.Linear(1024 * 7 * 7, self.L),
                nn.ReLU()
            )

        elif model_name == "efficientnet_b4":
            self.feature_extractor_part2 = nn.Sequential(
                nn.Linear(1792 * 7 * 7, self.L),
                nn.ReLU()
            )

        elif model_name == "vgg16" or model_name == "resnet34":
            self.feature_extractor_part2 = nn.Sequential(
                nn.Linear(512 * 7 * 7, self.L),
                nn.ReLU()
            )

        elif model_name == "resnet50" or model_name == "resnet26d":
            self.feature_extractor_part2 = nn.Sequential(
                nn.Linear(2048 * 7 * 7, self.L),
                nn.ReLU()
            )

        # self.pretrained_model = models.mobilenet_v2(pretrained=True)
        self.pooling_layer = nn.AdaptiveAvgPool2d(1)

        self.attention = nn.Sequential(
            nn.Linear(self.L, self.D),
            nn.Tanh(),
            nn.Linear(self.D, self.K)
        )
        self.classifer = nn.Sequential(
            nn.Linear(self.L * self.K, int(self.L * self.K / 4)),
            # nn.Dropout(0.2),
            nn.Linear(int(self.L * self.K / 4), 2)
        )

    def forward(self, x):
        x = torch.squeeze(x, dim=0)
        # x1, x2, x3 = x[:20,:,:],x[20:23,:,:],x[23:,:,:]
        # CT instance
        # features1 = self.model.encoder(x)[5]
        features1 = self.pretrained_model.forward_features(x)
        H = features1.contiguous().view(features1.size(0), -1)
        H = self.feature_extractor_part2(H)

        #vit
        # 变换 features1 的形状
        # batch_size, num_patches, hidden_size = features1.size()
        # features1 = features1.view(batch_size, num_patches, hidden_size)  # 将形状变为 (batch_size, num_patches, hidden_size)
        # 将 features1 作为输入进行自注意力计算
        # H = features1.view(batch_size * num_patches, hidden_size)
        # H = features1.view(-1,2048*7*7)


        # features1 = self.pretrained_model.features(x)
        # pooled_features1 = self.pooling_layer(features1)

        # pooled_features1 = features1[:,0,:]

        A1 = self.attention(H)  # NxK
        A1 = torch.transpose(A1, 1, 0)  # KxN
        A1 = F.softmax(A1, dim=1)  # softmax over N
        M1 = torch.mm(A1, H)  # KxL
        # print(M1.shape)

        output = self.classifer(M1)

        return output

if __name__ == "__main__":
    model = COPDNet(model_name="resnet50")
    # print(model)
    input = torch.randn(1, 20, 3, 224, 224)
    out = model(input)
    print(out.shape)
    print(out)