import torch
import torch.nn as nn
from torchvision import models



class Vgg19(nn.Module):
    def __init__(self, require_grad=False, rgb_range=1):
        super(Vgg19, self).__init__()
        vgg_pretrained_features = models.vgg19(pretrained=True).features

        self.slice1 = torch.nn.Sequential()
        for x in range(30):  # copy the first 30 layers of vgg19
            self.slice1.add_module(str(x), vgg_pretrained_features[x])

        if not require_grad:  # 设置无需训练
            for param in self.slice1.parameters():
                param.requires_grad = False

    def forward(self, x):
        h_relu5_1 = self.slice1(x)
        return h_relu5_1


class LTE(nn.Module):
    """ Learnable Texture Extractor """
    def __init__(self, requires_grad=True, use_pretrained=True):
        super(LTE, self).__init__()
        # vgg_pretrained_features = models.vgg19(pretrained=True).features
        vgg19 = models.vgg19(pretrained=True)
        vgg19.classifier[-1] = nn.Linear(
            in_features=4096, out_features=7)
        
        if use_pretrained:
            vgg19.load_state_dict(torch.load("../saved/vgg19-flora-classi.pth"))
        vgg_pretrained_features = vgg19.features

        # Copy pretrained parameters
        self.slice1 = torch.nn.Sequential()
        self.slice2 = torch.nn.Sequential()
        self.slice3 = torch.nn.Sequential()
        for x in range(2):
            self.slice1.add_module(str(x), vgg_pretrained_features[x])
        for x in range(2, 7):
            self.slice2.add_module(str(x), vgg_pretrained_features[x])
        for x in range(7, 12):
            self.slice3.add_module(str(x), vgg_pretrained_features[x])
        
        if not requires_grad:
            for param in self.slice1.parameters():
                param.requires_grad = requires_grad
            for param in self.slice2.parameters():
                param.requires_grad = requires_grad
            for param in self.slice3.parameters():
                param.requires_grad = requires_grad

    def forward(self, x):
        x = self.slice1(x)
        x = self.slice2(x)
        x = self.slice3(x)
        return x
    
    
class WideResNet50(nn.Module):
    def __init__(self, class_name, use_pretrained=True):
        super(WideResNet50, self).__init__()
        resnet50 = models.wide_resnet50_2(pretrained=True)
        fc_in = resnet50.fc.in_features
        fc_out = 7

        for idx, p in enumerate(resnet50.parameters()):
            if idx < 96:
                p.requires_grad = False

        resnet50.fc = nn.Sequential(
            nn.Linear(fc_in, 256),
            nn.SELU(),
            nn.Linear(256, fc_out))

        if use_pretrained:
            resnet50.load_state_dict(torch.load(
                "../saved/resnet50-flora-classi.pth"))
        
        # Build extractor
        self.lte = nn.Sequential()
        self.lte.add_module("conv1", resnet50.conv1)
        self.lte.add_module("bn1", resnet50.bn1)
        self.lte.add_module("relu", resnet50.relu)
        self.lte.add_module("maxpool", resnet50.maxpool)
        self.lte.add_module("layer1", nn.Sequential())
        self.lte.layer1.add_module("0", resnet50.layer1[0])
        self.lte.layer1.add_module("1", resnet50.layer1[1])
        self.lte.layer1.add_module("2", resnet50.layer1[2])
        # self.lte.layer1.add_module("2", torch.utils.bottleneck())
        # self.lte.layer1[2].add_module("conv1", resnet50.resnet50.layer1[2].conv1)
        # self.lte.layer1[2].add_module("bn1", resnet50.resnet50.layer1[2].bn1)
        # self.lte.layer1[2].add_module("conv1", resnet50.resnet50.layer1[2].conv1)
    
    def forward(self, x):
        return self.lte(x)


class ViT(nn.Module):
    def __init__(self, *, image_size, patch_size, num_classes, 
                 depth, heads, mlp_dim, channels = 3, 
                 dropout = 0., emb_dropout = 0.):
        super().__init__()
        
        assert image_size % patch_size == 0, \
            'image dimensions must be divisible by the patch size'
        
        num_patches = (image_size // patch_size) ** 2
        hidden_size = channels * patch_size ** 2
        
        assert num_patches > MIN_NUM_PATCHES, \
            'your number of patches ({num_patches}) is way too small \
                for attention to be effective. try decreasing your patch size'
        
        self.patch_size = patch_size
        self.hidden_size = hidden_size
        self.embedding = nn.Conv2d(channels, hidden_size, patch_size, patch_size)
        self.position_embedding = nn.Parameter(
            torch.randn(1, num_patches+1, hidden_size))
        self.cls = nn.Parameter(torch.randn(1, 1, hidden_size))
        self.dropout = nn.Dropout(emb_dropout)
        self.transformer = Encoder(hidden_size, depth, heads, 
                                   mlp_dim, dropout_rate=dropout)
        self.to_cls_token = nn.Identity()
        self.mlp_head = nn.Linear(hidden_size, num_classes)

    def forward(self, img, mask=None):
        x = self.embedding(img) 
        x = rearrange(x, 'b c h w -> b (h w) c') 
        b, n, _ = x.shape 
        cls_tokens = repeat(self.cls, '() n d -> b n d', b = b) 
        x = torch.cat((cls_tokens, x), dim=1) 
        x += self.pos_embedding[:, :(n + 1)] 
        x = self.dropout(x) 
        x = self.transformer(x) 
        x = self.to_cls_token(x[:, 0]) 
        return self.mlp_head(x)
