from src.dataset import CelebaAttDataset
import torch
import torch.utils.data
from torch import nn
import numpy as np
from torch.nn import functional as F
from src import const
from src.utils import parse_args_and_merge_const, Evaluator
from tensorboardX import SummaryWriter
import os
import pandas as pd

if __name__ == '__main__':
    parse_args_and_merge_const()
    if os.path.exists('models') is False:
        os.makedirs('models')
    df = pd.read_csv('./data/info.csv', index_col='image_name')
    train_df = df[df.status == 'train']
    val_df = df[df.status == 'val']
    test_df = df[df.status == 'test']
    train_dataset = CelebaAttDataset(train_df, mode=const.TRAIN_DATASET_MODE)
    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4)
    val_dataset = CelebaAttDataset(val_df, mode=const.VAL_TEST_DATASET_MODE)
    val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=const.VAL_TEST_BATCH_SIZE, shuffle=False, num_workers=4)
    test_dataset = CelebaAttDataset(test_df, mode=const.VAL_TEST_DATASET_MODE)
    test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=const.VAL_TEST_BATCH_SIZE, shuffle=False, num_workers=4)

    net = const.USE_NET(train_dataset.label_name)
    net = net.to(const.device)

    learning_rate = const.LEARNING_RATE
    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)

    writer = SummaryWriter(const.TRAIN_DIR)

    total_step = len(train_dataloader)
    step = 0
    for epoch in range(const.NUM_EPOCH):
        net.train()
        for i, sample in enumerate(train_dataloader):
            step += 1
            for key in sample:
                sample[key] = sample[key].to(const.device)
            output = net(sample)
            loss = net.cal_loss(sample, output)

            optimizer.zero_grad()
            loss['all'].backward()
            optimizer.step()

            if (i + 1) % 10 == 0:
                writer.add_scalar('loss', loss['all'].item(), step)
                writer.add_scalar('learning_rate', learning_rate, step)
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                      .format(epoch + 1, const.NUM_EPOCH, i + 1, total_step, loss['all'].item()))
        if (epoch + 1) % 1 == 0:
            print('Saving Model....')
            net.set_buffer('step', step)
            torch.save(net.state_dict(), 'models/' + const.MODEL_NAME)
            print('OK. Now evaluate..')

            net.eval()  # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
            with torch.no_grad():
                evaluator = Evaluator(train_dataset.label_name)
                for i, sample in enumerate(val_dataloader):
                    for key in sample:
                        sample[key] = sample[key].to(const.device)
                    output = net(sample)
                    evaluator.add(sample, output)
                ret = evaluator.evaluate()
                for key in ret:
                    print('val', key, ret[key])
                    writer.add_scalar('val/' + key, ret[key], step)
            with torch.no_grad():
                evaluator = Evaluator(train_dataset.label_name)
                for i, sample in enumerate(test_dataloader):
                    for key in sample:
                        sample[key] = sample[key].to(const.device)
                    output = net(sample)
                    evaluator.add(sample, output)
                ret = evaluator.evaluate()
                for key in ret:
                    print(key, ret[key])
                    writer.add_scalar('test_metrics/' + key, ret[key], step)
        learning_rate *= const.LEARNING_RATE_DECAY
        optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
