import os
import logging
import time
import matplotlib.pyplot as plt
import torch
from dataloder import mydata
from model import create_model
from utils import image_restore, label2names
from torch import optim
from torch import nn
from torchvision.transforms import Normalize
from arguments import my_args


try:
    os.makedirs('./logfile')
except FileExistsError:
    pass
try:
    os.makedirs('./model_output')
except FileExistsError:
    pass
logging.basicConfig(level=logging.DEBUG, datefmt="%m-%d %H:%M", filename="./logfile/cifar10.log", filemode="w")

# model and optimization method
args = my_args()
device, Epoch, learning_rate, step_size, gamma =\
    args.device, args.Epoch, args.learning_rate, args.step_size, args.gamma
model = create_model().to(device)
custom_data_loader = mydata()  # dict including 'train', 'valid'
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)  # multiply 0.1 each step_size epochs
criterion = nn.CrossEntropyLoss()
'''
dsiter = iter(custom_data_loader['train'])
images, labels = dsiter.next()
fig = plt.figure()
for idx in range(4):
    ax = fig.add_subplot(2,2, idx+1)
    ax.set_title(label2names(labels[idx].item()))
    plt.imshow(image_restore(images[idx]))
plt.show()
'''

best_accuracy = -0.1
norm = Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
if __name__ == "__main__":
    for epoch in range(Epoch):
        start = time.time()
        model.train()
        for batch_idx, batch in enumerate(custom_data_loader['train']):
            images = batch[0]
            for i in range(len(batch)):
                images[i] = norm(images[i])
            images = images.to(device)
            labels = batch[1].to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print(loss.item())
        end = time.time()
        logging.info('No.{} time: {} min'.format(epoch, (end-start)/60))
        scheduler.step()

        true_count, total = 0, 0
        model.eval()
        for batch_idx, batch in enumerate(custom_data_loader['valid']):
            images = batch[0]
            for i in range(len(batch)):
                images[i] = norm(images[i])
            images = images.to(device)
            labels = batch[1]
            outputs = model(images)
            outputs = [list(it).index(max(list(it))) for it in outputs.cpu().detach().numpy()]
            nums = torch.sum(labels == torch.tensor(outputs)).item()
            true_count += nums
            total += len(batch[0])
        accuracy = true_count/total
        logging.info('No.{} validation accuracy: {}'.format(epoch, accuracy))

        if accuracy > best_accuracy:
            best_accuracy = accuracy
            logging.info('current best accuracy of model No.{}: {}'.format(epoch, best_accuracy))
            state = {'net': model.state_dict(),
                     'optimizer': optimizer.state_dict(),
                     'epoch': epoch}
            torch.save(state, os.path.join(
                os.path.abspath("./model_output"), 'm' + str(epoch))
                       )






















