import argparse

from tqdm import tqdm
from torch.utils.tensorboard.writer import SummaryWriter

from data import *
from utils import *
from Res2Net import *
from ResNet_bottleneck import ResNetBottleneck
from ResNet10 import ResNet
from ResNet_basic_block_deeper_less_linear import resnet18
def train(args, epoch):
    net.train()
    train_tqdm = tqdm(train_loader, desc="Epoch " + str(epoch))
    for index, (inputs, labels) in enumerate(train_tqdm):
        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs.to(args.device))
        loss = criterion(outputs, labels.to(args.device))
        loss.backward()
        optimizer.step()
        train_tqdm.set_postfix({"loss": "%.3g" % loss.item()})


def validate(args, epoch, loss_vector, accuracy_vector):
    net.eval()
    val_loss, correct = 0, 0
    for index, (data, target) in enumerate(test_loader):
        data = data.to(args.device)
        target = target.to(args.device)
        output = net(data)
        val_loss += criterion(output, target.to(args.device)).data.item()

        pred = output.data.max(1)[1]  # get the index of the max log-probability
        correct += pred.eq(target.data).cpu().sum()

    val_loss /= len(test_loader)
    loss_vector.append(val_loss)
    writer.add_scalar("loss/validation", val_loss, epoch)

    accuracy = 100. * correct.to(torch.float32) / len(test_loader.dataset)
    accuracy_vector.append(accuracy)
    writer.add_scalar("accuracy/validation", accuracy, epoch)

    logger.info("***** Eval results *****")
    logger.info('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        val_loss, correct, len(test_loader.dataset), accuracy))
    # save the best model
    if accuracy == max(accuracy_vector):
        torch.save(net.state_dict(), os.path.join(args.logdir, "best.pth"))
    # save the figure
    save_fig(loss_vector, accuracy_vector, args.logdir, epoch + 1)


def save_fig(loss_vector, accuracy_vector, logdir, epoch):
    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, epoch + 1), loss_vector)
    plt.title('validation loss')
    plt.savefig(os.path.join(logdir, 'validation_loss'))

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, epoch + 1), accuracy_vector)
    plt.title('validation accuracy')
    plt.savefig(os.path.join(logdir, 'validation_accuracy'))
    plt.close('all')


def main(args):
    loss_vector, accuracy_vector = [], []
    # args.logdir e.g: resnet_log
    net_name = os.path.basename(args.logdir[:-4])
    logger.info("***** Running configuration *****")
    logger.info("  Model Name = %s", net_name)
    logger.info("  Num examples = %d", len(train_loader) * args.batch_size)
    logger.info("  Num Epochs = %d", args.epochs)
    logger.info("  Batch size = %d", args.batch_size)
    logger.info("  Learning rate = %f", args.learning_rate)
    logger.info("  Device = %s", args.device)
    logger.info("*** Training ***")
    for epoch in range(args.epochs):
        train(args, epoch)
        with torch.no_grad():
            validate(args, epoch, loss_vector, accuracy_vector)


if __name__ == "__main__":
    # running:
    # python main.py --data_path "./data"  --batch_size 128 --device  "cuda" --learning_rate 0.01 --epochs 5 --logdir "./log"   
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_path", default="../data", type=str, help="The input data dir")
    parser.add_argument("--batch_size", default=4, type=int, help="The batch size of training")
    parser.add_argument("--device", default='cpu', type=str, help="The training device")
    parser.add_argument("--learning_rate", default=0.0004, type=float, help="learning rate")
    parser.add_argument("--epochs", default=20, type=int, help="Training epoch")
    parser.add_argument("--logdir", default="./log", type=str)
    # uncomment this line and comment the line with params if you want to run from command line
    # args = parser.parse_args()
    args = parser.parse_args(
        ['--data_path', './data', '--batch_size', '32', '--device', 'cuda', '--learning_rate', '0.0004', '--epochs',
         '40',
         '--logdir', './resnet18_log'])
    train_loader, test_loader, classes = cifar100_dataset(args)
    writer = SummaryWriter(os.path.join(args.logdir, "tensorboard"))
    # change the model here
    net = resnet18().to(args.device)
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate)
    if not os.path.exists(args.logdir):
        os.makedirs(args.logdir)
    logger = init_logger(args.logdir)

    lossv, accv = [], []
    index_num = 0
    main(args)

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, args.epochs + 1), lossv)
    plt.title('validation loss')
    plt.savefig(os.path.join(args.logdir, 'validation_loss'))

    plt.figure(figsize=(5, 3))
    plt.plot(np.arange(1, args.epochs + 1), accv)
    plt.title('validation accuracy')
    plt.savefig(os.path.join(args.logdir, 'validation_accuracy'))
