import  torch
from torch import nn, optim
from torch.nn import functional as F
import torchvision
from    torch.utils.data import DataLoader
from    torchvision import datasets
from    torchvision import transforms

batchsz = 128

cifar_train = datasets.CIFAR100('cifar100', True, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor()
    ]), download=True)
cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)

cifar_test = datasets.CIFAR100('cifar100', False, transform=transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor()
    ]), download=True)
cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)

sample = next(iter(cifar_train))
sample1 = next(iter(cifar_test))
print('train:', sample[0].shape, sample[1].shape, 'test:', sample1[0].shape, sample1[1].shape)

def one_hot(label, depth=10):
    out = torch.zeros(label.size(0), depth)
    idx = torch.LongTensor(label).view(-1, 1)
    out.scatter_(dim=1, index=idx, value=1)
    return out

device = torch.device('cuda:0')

class VGG13(nn.Module):
    def __init__(self):
        super(VGG13, self).__init__()
        # 卷积层
        self.conv_net = nn.Sequential(
            # unit 1
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2),
            # unit 2
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2),

            # unit 3
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2),

            # unit 4
            nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2),

            # unit 5
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2))
        # 全连接层
        self.fc_net = nn.Sequential(
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 100)

        )

    def forward(self, input):
        x = self.conv_net(input)
        x = x.view(input.shape[0], -1)
        out = self.fc_net(x)

        return out

def main():

    model = VGG13().to(device)
    optimizer = optim.Adam(model.parameters(), lr=1e-4)
    criteon = nn.CrossEntropyLoss().to(device)
    for epoch in range(10):
        for step, (x, y) in enumerate(cifar_train):
            x, y = x.to(device), y.to(device)
            logits = model(x)
            loss = criteon(logits, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step%100 == 0:
                print('epoch:', epoch, 'step:', step, 'loss:', loss.item())

        totalacc = 0
        for x, y in cifar_test:
            x, y = x.to(device), y.to(device)
            # [b, 100]
            logits = model(x)
            # [b]
            prob = logits.argmax(dim=1)
            acc = prob.eq(y).sum().float().item()
            totalacc += acc


        total_num = len(cifar_test.dataset)
        acc = totalacc / total_num

        print('epoch:', epoch, 'accuracy:', acc)


if __name__ == '__main__':
    main()
