import torchvision
import torch
from torch.nn import init
from torch import nn
from torchvision import transforms


class ResnetClass10(torch.nn.Module):
    def __init__(self, d_model):
        super(ResnetClass10, self).__init__()
        self.cnn_layers = torchvision.models.resnet152(pretrained=True)
        self.output_layers = torch.nn.Linear(1000, d_model)

    def forward(self, x):
        # stack convolution layers
        x = self.cnn_layers(x)
        out = self.output_layers(x)
        return out

    # xiavier 初始化
    def weigth_init(self):
        for m in self.modules():
            # 所有的卷积层都进行xiavier变换
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform_(m.weight.data)
                # init.normal_(m.weight.data)
                init.constant_(m.bias.data, 0.1)
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            # 所有的线性层都进行均值和方差的高斯初始化
            elif isinstance(m, nn.Linear):
                m.weight.data.normal_(0, 0.01)
                m.bias.data.zero_()


import random

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
batch_size = 64

transform1 = transforms.Compose(
    [
        transforms.RandomHorizontalFlip(),
        transforms.ColorJitter(),
        transforms.Pad((random.randint(0, 5), random.randint(0, 5), random.randint(0, 5), random.randint(0, 5))),
        transforms.Resize(32),
        transforms.ToTensor()
    ]
)

transform2 = transforms.Compose(
    [
        transforms.ToTensor(),
    ]
)
model = ResnetClass10(50)
model = model.to(device)
model = torchvision.models.resnet152(pretrained=True).to(device)

inputs = torch.randn(1, 3, 400, 400).to(device)
# print("输入大小", inputs.size())
start_layer = 0

for i, layer in enumerate(model.children()):
    if i < 2:
        inputs = layer(inputs)
print(inputs.shape)
