from __future__ import print_function, division

import warnings

import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as tud
from torch.optim import lr_scheduler
from torchvision import models

import model
from dataset import CelestialDataset

warnings.filterwarnings("ignore")

# since there are 7 data files in the training dataset, we define group as 7
GROUP = 7
BATCH_SIZE = 100
SHUFFLE = False
NUM_WORKERS = 0
EPOCHS = 5

if __name__ == "__main__":
    model_googlenet = models.googlenet(pretrained=True)
    model_googlenet = model_googlenet.to(model.device)

    # set loss function to cross_entropy loss function
    criterion = nn.CrossEntropyLoss()
    # we set optimizer to SGD and learning rate
    optimizer: optim.SGD = optim.SGD(model_googlenet.parameters(), lr=0.0005, momentum=0.9)
    # we use StepLR to justify the learning rate every 100 steps
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.9)

    for i in range(GROUP):
        celestial_data = CelestialDataset(action='train', group_num=i)
        training_celestial_dl = tud.DataLoader(dataset=celestial_data, batch_size=10, shuffle=False, num_workers=0)
        model_googlenet = model.model_train(model=model_googlenet,
                                            cri=criterion,
                                            opt=optimizer,
                                            scheduler=exp_lr_scheduler,
                                            num_epochs=EPOCHS,
                                            dl=training_celestial_dl,
                                            data_group=i)

    torch.save(model_googlenet.state_dict(), './googlenet.pth')
