'''
@Author: JintuZheng 郑晋图
@Code: Trainer for mnist
'''
import torch
from torch.utils.data import random_split, DataLoader
from torch.autograd import Variable
from ours_dataloader import MnistDataset
import torch.nn.functional as F
from model import R34
import torch.nn as nn
from tqdm import trange
from matplotlib import pyplot as plt
from utils import build_loss_fig


batch_szie = 100
lr = 3e-4
epoches = 5

whole_dataset = MnistDataset(root='Dataset/mnist_train/') # Make dataset
train_size = int(0.70 * len(whole_dataset)) # Set train size
train_bag, val_bag = random_split(whole_dataset, [train_size, len(whole_dataset)-train_size]) # Cross validation

train_loader= DataLoader(train_bag, batch_size=batch_szie, shuffle=True, num_workers=0)
val_loader= DataLoader(val_bag, batch_size=batch_szie, shuffle=True, num_workers=0)

device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu') # Device set
model = R34(10) # build model
optim = torch.optim.Adam(model.parameters(),lr=lr) # Build optim

model = model.to(device)

train_losses = [] # Train losses container
val_losses = [] # Val losses contatiner

for epoch in trange(epoches):
    
    for index, (img, label) in enumerate(train_loader):
        model.train()
        img, label = img.to(device), label.to(device)
        optim.zero_grad() # clear grad

        pred = model(img)
        loss = F.binary_cross_entropy_with_logits(pred, label)
        loss.backward()
        optim.step() 
        #loss =loss.detach().cpu().clone().numpy()
        iter_loss = loss.item() 
        print('==>>Training: epoch id:{} index:{}/{}, loss:{}'.format(epoch,index,len(train_loader),iter_loss))
        train_losses.append(iter_loss)

        if index%10 == 0 and index !=0:
            model.eval()
            with torch.no_grad():
                for index, (img, label) in enumerate(val_loader):
                    img, label = img.to(device), label.to(device)
                    pred = model(img)
                    loss = F.binary_cross_entropy_with_logits(pred, label)
                    iter_loss = loss.item()
                    print('==>>Evaluating: epoch id:{} == index:{}/{} == loss:{}'.format(epoch, index,len(val_loader),iter_loss))
                    val_losses.append(iter_loss)


build_loss_fig(train_losses, 'Train Loss') # build loss figure
build_loss_fig(val_losses, 'Val Loss') # build loss figure

torch.save(model.state_dict(), 'weights/resnet34_mnist_{}.pth'.format(epoches))
print('Over training====>')