import argparse
from train import Train
from config import Config
import numpy as np
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter

from dataset.cifar10_dataset import CIFAR10_Dataset, CIFAR10_Val
from model.resnet import ResidualBlock, ResNet

argparser = argparse.ArgumentParser()
argparser.add_argument("--num_epochs", type=int, help="Total number of training epochs.")
argparser.add_argument("--num_epochs_teacher", type=int, help="Total number of epochs for teacher net initialization.")
argparser.add_argument("--batch_size", type=int, help="Batch size for training and eval.")
argparser.add_argument("--learning_rate", type=float, help="The initial learning rate for Adam.")
argparser.add_argument("--noise_rate", type=float, help="Noise rate in training dataset, 0 is clean.")
argparser.add_argument("--lnrl", type=int, help="Whether to perform label noise representation learning.")
argparser.add_argument("--val_size", type=float, help="Train dataset size for teacher net initialization.")
FLAGS, unparsed = argparser.parse_known_args()

# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config = Config(FLAGS, device)

# Image preprocessing modules
transform = transforms.Compose([
    transforms.Pad(4),
    transforms.RandomHorizontalFlip(),
    transforms.RandomCrop(32),
    transforms.ToTensor()])
    
# CIFAR-10 dataset
print('Building noise dataset...')
train_dataset = torchvision.datasets.CIFAR10(root='data/',
                                             train=True, 
                                             download=True)

test_dataset = torchvision.datasets.CIFAR10(root='data/',
                                            train=False, 
                                            transform=transforms.ToTensor())

noise_dataset = CIFAR10_Dataset(train_dataset, config.noise_rate, transform)
val_dataset = CIFAR10_Val(train_dataset, config.val_size, transform)

# Data loader
print('Building data loader...')
train_loader = torch.utils.data.DataLoader(dataset=noise_dataset,
                                           batch_size=config.batch_size,
                                           shuffle=True)

val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
                                           batch_size=config.batch_size,
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=config.batch_size,
                                          shuffle=False)

student_net = ResNet(ResidualBlock, [2, 2, 2]).to(device)
teacher_net = ResNet(ResidualBlock, [2, 2, 2]).to(device)

train = Train(
    student_net, 
    teacher_net, 
    train_loader, 
    val_loader, 
    test_loader, 
    config
)
'''
if config.lnrl:
    print('Teacher net initialization begin...')
    teacher_net = Train.train(teacher_net, val_loader, config)
    acc = eval(teacher_net, test_loader, config)
    print('Accuracy of teacher init model on the test images: {} %'.format(acc))
'''
# Train the model
print('Training begin...')


# train.train_batch_filter()
# train.train_epoch_filter()
# train.train_batch_filter_replace()
train.train_epoch_filter_replace()

'''
total_step = len(train_loader)
curr_lr = config.learning_rate
for epoch in range(config.num_epochs):
    student_net.train()
    teacher_net.train()
    avg_loss = 0
    for i, (images, clean, noise) in enumerate(train_loader):
        images = images.to(device)
        noise = noise.to(device)
        
        # Forward pass
        if config.lnrl:
            outputs1 = student_net(images)
            outputs2 = teacher_net(images)
            loss, loss_teacher = tri_CE(outputs1, outputs2, noise, noise_rate)
            # Backward and optimize
            student_optimizer.zero_grad()
            teacher_optimizer.zero_grad()
            loss.backward()
            loss_teacher.backward()
            student_optimizer.step()
            teacher_optimizer.step()
        else:
            outputs = student_net(images)
            loss = criterion(outputs, noise)
            # Backward and optimize
            student_optimizer.zero_grad()
            loss.backward()
            student_optimizer.step()
        
        avg_loss = (avg_loss * i + loss) / (i + 1)
    print("Epoch [{}/{}], Loss: {:.4f}"
            .format(epoch+1, num_epochs, avg_loss))
    scalar = 'train loss: noise rate = {}, lnrl = {}'.format(noise_rate, lnrl)
    writer.add_scalar(scalar, avg_loss, global_step=epoch+1)

    # Decay learning rate
    if (epoch+1) % 20 == 0:
        curr_lr /= 3
        update_lr(student_optimizer, curr_lr)
        update_lr(teacher_optimizer, curr_lr)


    # Test the model
    student_net.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_loader:
            images = images.to(device)
            labels = labels.to(device)
            outputs = student_net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

        print('Accuracy of the student model on the test images: {} %'.format(100 * correct / total))
        scalar = 'student test accuracy: noise rate = {}, lnrl = {}'.format(noise_rate, lnrl)
        writer.add_scalar(scalar, correct / total, global_step=epoch+1)

    teacher_net.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_loader:
            images = images.to(device)
            labels = labels.to(device)
            outputs = teacher_net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

        print('Accuracy of the teacher model on the test images: {} %'.format(100 * correct / total))
        scalar = 'teacher test accuracy: noise rate = {}, lnrl = {}'.format(noise_rate, lnrl)
        writer.add_scalar(scalar, correct / total, global_step=epoch+1)
'''
