import os
import sys
import time
import torch
import numpy as np
import torchvision.utils as vutils

PYLIBS_DIR = None
if PYLIBS_DIR is not None:
    sys.path.insert(1, PYLIBS_DIR)

from dataloader import DepthDataLoader
from models import Pix2PixModel
from tensorboardX import SummaryWriter
from options.train_options import TrainOptions
from utils import *
import utils


class AverageMeter(object):
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def evaluate(opt, model, test_loader):
    print('evaluating...')
    with torch.no_grad():
        avg_metric =RunningAverageDict()
        for sample_batched in test_loader:
            image, depth = sample_batched['image'], sample_batched['depth']
            model.set_input(image, depth)
            pred = model.forward2()
            if opt.dataset in ['vaihingen', 'potsdam']:
                real_B = torch.floor((depth*255))
                fake_B = torch.floor(((pred+1)/2*255))
            else:
                real_B = depth
                fake_B = pred
            metric = compute_errors(real_B, fake_B)
            avg_metric.update(metric)
        print('evaluation done, avg_rmse: {:.2f}'.format(avg_metric.get_value()['rmse']))
    return avg_metric


def main():
    # os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'
    opt = TrainOptions().parse()
    model = Pix2PixModel(opt)
    model.setup(opt)

    gpu_nums = torch.cuda.device_count()
    print("{} GPUs!".format(gpu_nums))

    train_loader = DepthDataLoader(opt, 'train').data
    test_loader = DepthDataLoader(opt, 'online_eval').data
    writer = SummaryWriter(log_dir=opt.checkpoints_dir)

    best_rmse = 10000.0
    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        batch_time = AverageMeter()
        avg_metrics = utils.RunningAverageDict()
        iter_data_time = time.time()
        for i, sample_batched in enumerate(train_loader):
            model.train()
            train_iteration = (epoch - 1) * len(train_loader) + i

            image, depth = sample_batched['image'], sample_batched['depth']
            model.set_input(image, depth)
            model.optimize_parameters()

            batch_time.update(time.time() - iter_data_time)
            iter_data_time = time.time()

            losses = model.get_current_losses()
            visual_ret = model.get_current_visuals()
            if opt.dataset in ['vaihingen', 'potsdam']:
                real_B = torch.floor(visual_ret['real_B'] * 255)
                fake_B = torch.floor(visual_ret['fake_B'] * 255)
            else:
                real_B = visual_ret['real_B']
                fake_B = visual_ret['fake_B']
            metric = compute_errors(real_B, fake_B)
            avg_metrics.update(metric)

            if (train_iteration+1) % opt.display_freq == 0:

                writer.add_scalar('train/G_GAN', losses['G_GAN'], train_iteration)
                writer.add_scalar('train/G_L1', losses['G_L1'], train_iteration)
                writer.add_scalar('train/D_real', losses['D_real'], train_iteration)
                writer.add_scalar('train/D_fake', losses['D_fake'], train_iteration)
                writer.add_scalar('train/rmse', avg_metrics.get_value()['rmse'], train_iteration)
                # writer.add_image('train/Prediction', vutils.make_grid(visual_ret['fake_B']), train_iteration)
                # writer.add_image('train/Depth_GT', vutils.make_grid(depth), train_iteration)

            print('Epoch: [{0}][{1}/{2}]\t' \
                  'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t' \
                  'G_L1 {g_loss:.4f}\t' \
                  'G_GAN {g_gan:.4f}\t' \
                  'rmse {rmse:.4f}'
                  .format(epoch, i, len(train_loader), batch_time=batch_time, g_loss=losses['G_L1'],
                          g_gan=losses['G_GAN'], rmse=avg_metrics.get_value()['rmse']))

            if (train_iteration + 1) % opt.eval_freq == 0:
                model.eval()
                val_metrics = evaluate(opt, model, test_loader)
                if val_metrics.get_value()['rmse'] < best_rmse:
                    model.save_networks('best')
                    best_rmse = val_metrics.get_value()['rmse']
                writer.add_scalar('test/a1', val_metrics.get_value()['a1'], train_iteration)
                writer.add_scalar('test/a2', val_metrics.get_value()['a2'], train_iteration)
                writer.add_scalar('test/a3', val_metrics.get_value()['a3'], train_iteration)
                writer.add_scalar('test/zncc', val_metrics.get_value()['zncc'], train_iteration)
                writer.add_scalar('test/val_rmse', val_metrics.get_value()['rmse'], train_iteration)
                print('last train rmse: {:.2f}'.format(avg_metrics.get_value()['rmse']))
                print('last evaluation rmse: {:.2f}'.format(val_metrics.get_value()['rmse']))
                print()
                print('best evaluation rmse: {:.2f}'.format(best_rmse))
                with open(os.path.join(opt.log_dir, opt.name, 'val_result.txt '), 'w') as f:
                    print('last train rmse: {:.2f}'.format(avg_metrics.get_value()['rmse']), file=f)
                    print('last evaluation rmse: {:.2f}'.format(val_metrics.get_value()['rmse']), file=f)
                    print()
                    print('best evaluation rmse: {:.2f}'.format(best_rmse), file=f)

        if epoch % opt.save_epoch_freq == 0:
            print('saving the latest model (epoch %d)' % (epoch))
            model.save_networks('latest')

        model.update_learning_rate()
    print('best evaluation rmse: {:.2f}'.format(best_rmse))


if __name__ == '__main__':
    main()
