import torch,torchvision
from torch import nn
from torch.utils.data import DataLoader
train_data=torchvision.datasets.CIFAR10(root='./data',train=True,transform=torchvision.transforms.Compose([torchvision.transforms.Resize(227),torchvision.transforms.ToTensor()]),download=True)
dataloader=DataLoader(train_data,batch_size=64)

class my_alexnet(nn.Module):
    def __init__(self):
        super(my_alexnet,self).__init__()
        self.feature=nn.Sequential(
            nn.Conv2d(3,96,11,4,1),
            nn.ReLU(),
            nn.MaxPool2d(3,2),
            nn.Conv2d(96,256,5,padding=2),nn.ReLU(),
            nn.MaxPool2d(3,2),
            nn.Conv2d(256,384,3,padding=1),nn.ReLU(),
            nn.Conv2d(384,384,3,padding=1),nn.ReLU(),
            nn.Conv2d(384,256,3,padding=1),nn.ReLU(),
            nn.MaxPool2d(3,2),nn.Flatten(),
            nn.Linear(9216,4096),nn.ReLU(),nn.Dropout(p=0.5),
            nn.Linear(4096,4096),nn.ReLU(),nn.Dropout(p=0.5),
            nn.Linear(4096,10)
        )

    def forward(self,x):
        x=self.feature(x)
        return x

alexnet=my_alexnet()
loss_fn=nn.CrossEntropyLoss()
optim=torch.optim.SGD(alexnet.parameters(),lr=1e-2)
alexnet.train()
enpoch=30
for i in range(enpoch):
    for data in dataloader:
        img,lable=data
        ans=alexnet(img)
        optim.zero_grad()
        loss=loss_fn(ans,lable)
        loss.backward()
        optim.step()
        print(loss.item())





