from tqdm import tqdm

from GooleNet import GooLeNet
import torch
import torch.nn as nn
from torchvision import datasets,transforms
from torch.utils.data import DataLoader

transform = transforms.Compose([transforms.Resize((224,224),interpolation=1),transforms.Grayscale(num_output_channels=1),
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.2458], std=[0.0612])])
train_data = datasets.ImageFolder('./data/train',transform=transform)
train_loader = DataLoader(dataset=train_data,batch_size=8,shuffle=False)

test_data = datasets.ImageFolder('./data/test',transform=transform)
test_loader = DataLoader(dataset=test_data,batch_size=8,shuffle=False)



model = GooLeNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(),lr=0.01,momentum=0.5)

def train(epoch):
    loss_runtime = 0.0
    for batch,data in enumerate(tqdm(train_loader)):
        x,y = data
        y_pred = model(x)
        loss = criterion(y_pred,y)
        loss_runtime += loss.item()
        loss_runtime/=x.size(0)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    if epoch%10==0:
        print("after %s epochs, loss is %.8f" % (epoch + 1, loss_runtime))

if __name__ == '__main__':
    for epoch in range(100):
        train(epoch)