from os import makedirs, system
from os.path import join, exists, dirname, abspath
from tool import ConfigSensatUrban as cfg

import torch
import numpy as np
import torch.optim as optim
import time, pickle, argparse, glob, os, shutil
from datetime import datetime
from dataset_SensatUrban import SensatUrban
from torch.utils.data import DataLoader
from RandLANet import Network, compute_loss, compute_acc, IoUCalculator

# parser = argparse.ArgumentParser()
# parser.add_argument('--checkpoint_path', default='output/checkpoint.tar', help='Model checkpoint path [default: None]')
# parser.add_argument('--log_dir', default='output', help='Dump dir to save model checkpoint [default: log]')
# parser.add_argument('--max_epoch', type=int, default=400, help='Epoch to run [default: 180]')
# parser.add_argument('--batch_size', type=int, default=20, help='Batch Size during training [default: 8]')
# FLAGS = parser.parse_args()


LOG_DIR = 'output'
CHECKPOINT_PATH = 'output/checkpoint.tar'
PYPLOT_PATH = 'output/plot.tar'

plot_data = {'train': {'acc': [], 'loss': [], 'miou': []},
             'val': {'acc': [], 'loss': [], 'miou': []}}


#################################################   log   #################################################
# LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR):
    os.mkdir(LOG_DIR)
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'a')


def log_string(out_str, end='\n'):
    LOG_FOUT.write(out_str + end)
    LOG_FOUT.flush()
    print(out_str, end=end)


#################################################   dataset   #################################################
# Init datasets and dataloaders
def my_worker_init_fn(worker_id):
    np.random.seed(np.random.get_state()[1][0] + worker_id)


# Create Dataset and Dataloader
TRAIN_DATASET = SensatUrban('training')
VAL_DATASET = SensatUrban('validation')
TEST_DATASET = SensatUrban('test')
print("TRAIN_DATASET_LEN:{0}".format(len(TRAIN_DATASET)))
print("VAL_DATASET_LEN:{0}".format(len(VAL_DATASET)))
print("TEST_DATASET_LEN:{0}".format(len(TEST_DATASET)))

TRAIN_DATALOADER = DataLoader(TRAIN_DATASET, batch_size=cfg.batch_size, num_workers=20,
                              worker_init_fn=my_worker_init_fn, collate_fn=TRAIN_DATASET.collate_fn)
VAL_DATALOADER = DataLoader(VAL_DATASET, batch_size=cfg.val_batch_size, num_workers=20,
                            worker_init_fn=my_worker_init_fn, collate_fn=VAL_DATASET.collate_fn)
TEST_DATALOADER = DataLoader(TEST_DATASET, batch_size=cfg.val_batch_size, num_workers=20,
                             worker_init_fn=my_worker_init_fn, collate_fn=TEST_DATASET.collate_fn)

print("TRAIN_DATALOADER_LEN:{0}".format(len(TRAIN_DATALOADER)))
print("VAL_DATALOADER_LEN:{0}".format(len(VAL_DATALOADER)))
print("TEST_DATALOADER_LEN:{0}".format(len(TEST_DATALOADER)))

# TRAIN_DATALOADER = DataLoader(TRAIN_DATASET, batch_size=cfg.batch_size, shuffle=True, num_workers=20, worker_init_fn=my_worker_init_fn, collate_fn=TRAIN_DATASET.collate_fn)
# TEST_DATALOADER = DataLoader(TEST_DATASET, batch_size=cfg.batch_size, shuffle=True, num_workers=20, worker_init_fn=my_worker_init_fn, collate_fn=TEST_DATASET.collate_fn)

# print(len(TRAIN_DATALOADER), len(TEST_DATALOADER))


#################################################   network   #################################################
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device={0}".format(device))

net = Network(cfg)
net.to(device)

# Load the Adam optimizer
optimizer = optim.Adam(net.parameters(), lr=cfg.learning_rate)

# Load checkpoint if there is any
it = -1  # for the initialize value of `LambdaLR` and `BNMomentumScheduler`
start_epoch = 0
# CHECKPOINT_PATH = FLAGS.checkpoint_path
if CHECKPOINT_PATH is not None and os.path.isfile(CHECKPOINT_PATH):
    checkpoint = torch.load(CHECKPOINT_PATH)
    net.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    start_epoch = checkpoint['epoch']
    log_string("-> loaded checkpoint %s (epoch: %d)" % (CHECKPOINT_PATH, start_epoch))

# TODO parallel

if torch.cuda.device_count() > 1:
    log_string("Let's use %d GPUs!" % (torch.cuda.device_count()))
    # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
    net = torch.nn.DataParallel(net)


#################################################   training functions   ###########################################


def adjust_learning_rate(optimizer, epoch):
    lr = optimizer.param_groups[0]['lr']
    lr = lr * cfg.lr_decays[epoch]
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def train_one_epoch():
    stat_dict = {}  # collect statistics
    adjust_learning_rate(optimizer, EPOCH_CNT)
    net.train()  # set model to training mode
    iou_calc = IoUCalculator(cfg)
    for batch_idx, batch_data in enumerate(TRAIN_DATALOADER):

        # TODO .cuda()

        for key in batch_data:
            if type(batch_data[key]) is list:
                for i in range(len(batch_data[key])):
                    batch_data[key][i] = batch_data[key][i].cuda()
            else:
                batch_data[key] = batch_data[key].cuda()

        # Forward pass
        optimizer.zero_grad()
        end_points = net(batch_data)

        loss, end_points = compute_loss(end_points, cfg)
        loss.backward()
        optimizer.step()

        acc, end_points = compute_acc(end_points)
        iou_calc.add_data(end_points)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'iou' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        batch_interval = 10
        if (batch_idx + 1) % batch_interval == 0:
            log_string(' ---- batch: %03d ----' % (batch_idx + 1), end='')
            # TRAIN_VISUALIZER.log_scalars({key:stat_dict[key]/batch_interval for key in stat_dict},
            #     (EPOCH_CNT*len(TRAIN_DATALOADER)+batch_idx)*BATCH_SIZE)
            for key in sorted(stat_dict.keys()):
                value = stat_dict[key] / batch_interval
                log_string('mean %s: %f  ' % (key, value), end='')
                plot_data['train'][key] += [value]
                stat_dict[key] = 0
            log_string('')
    mean_iou, iou_list = iou_calc.compute_iou()
    plot_data['train']['miou'] += [mean_iou]
    log_string('mean IoU:{:.1f}'.format(mean_iou * 100))
    s = 'IoU:'
    for iou_tmp in iou_list:
        s += '{:5.2f} '.format(100 * iou_tmp)
    log_string(s)


def evaluate_one_epoch():
    stat_dict = {}  # collect statistics
    net.eval()  # set model to eval mode (for bn and dp)
    iou_calc = IoUCalculator(cfg)
    for batch_idx, batch_data in enumerate(VAL_DATALOADER):
        for key in batch_data:
            if type(batch_data[key]) is list:
                for i in range(len(batch_data[key])):
                    batch_data[key][i] = batch_data[key][i].cuda()
            else:
                batch_data[key] = batch_data[key].cuda()

        # Forward pass
        with torch.no_grad():
            end_points = net(batch_data)

        loss, end_points = compute_loss(end_points, cfg)

        acc, end_points = compute_acc(end_points)
        iou_calc.add_data(end_points)

        # Accumulate statistics and print out
        for key in end_points:
            if 'loss' in key or 'acc' in key or 'iou' in key:
                if key not in stat_dict: stat_dict[key] = 0
                stat_dict[key] += end_points[key].item()

        # batch_interval = 10
        # if (batch_idx + 1) % batch_interval == 0:
        #     log_string(' ---- batch: %03d ----' % (batch_idx + 1))

    for key in sorted(stat_dict.keys()):
        value = stat_dict[key] / (float(batch_idx + 1))
        log_string('eval mean %s: %f  ' % (key, value), end='')
        plot_data['val'][key] += [value]
    log_string('')
    mean_iou, iou_list = iou_calc.compute_iou()
    plot_data['val']['miou'] += [mean_iou]
    log_string('mean IoU:{:.1f}'.format(mean_iou * 100))
    s = 'IoU:'
    for iou_tmp in iou_list:
        s += '{:5.2f} '.format(100 * iou_tmp)
    log_string(s)


def train(start_epoch):
    global EPOCH_CNT
    loss = 0
    for epoch in range(start_epoch, cfg.max_epoch):
        EPOCH_CNT = epoch
        log_string('**** EPOCH %03d ****' % (epoch))

        log_string(str(datetime.now()))

        np.random.seed()
        train_one_epoch()

        # TODO set number

        if EPOCH_CNT == 0 or EPOCH_CNT % 5 == 4:  # Eval every 10 epochs
            log_string('**** EVAL EPOCH %03d START****' % (epoch))
            evaluate_one_epoch()
            log_string('**** EVAL EPOCH %03d END****' % (epoch))
        # Save checkpoint
        save_dict = {'epoch': epoch + 1,  # after training one epoch, the start_epoch should be epoch+1
                     'optimizer_state_dict': optimizer.state_dict(),
                     'loss': loss,
                     }
        try:  # with nn.DataParallel() the net is added as a submodule of DataParallel
            save_dict['model_state_dict'] = net.module.state_dict()
        except:
            save_dict['model_state_dict'] = net.state_dict()
        torch.save(save_dict, os.path.join(LOG_DIR, 'checkpoint.tar'))
        torch.save(plot_data, PYPLOT_PATH)


def test(num_votes=100):
    # Smoothing parameter for votes
    test_smooth = 0.95

    # Test saving path
    saving_path = time.strftime('results/Log_%Y-%m-%d_%H-%M-%S', time.gmtime())
    test_path = join('test', saving_path.split('/')[-1])
    makedirs(test_path) if not exists(test_path) else None
    makedirs(join(test_path, 'test_preds')) if not exists(join(test_path, 'test_preds')) else None

    step_id = 0
    epoch_id = 0
    last_min = -0.5

    test_probs = [np.zeros(shape=[l.shape[0], cfg.num_classes], dtype=np.float32)
                  for l in TEST_DATASET.input_labels]

    while last_min < num_votes:
        for batch_idx, batch_data in enumerate(TEST_DATALOADER):
            for key in batch_data:
                if type(batch_data[key]) is list:
                    for i in range(len(batch_data[key])):
                        batch_data[key][i] = batch_data[key][i].cuda()
                else:
                    batch_data[key] = batch_data[key].cuda()

            # Forward pass
            with torch.no_grad():
                end_points = net(batch_data)

            logits = end_points['logits'].transpose(1, 2).reshape(-1, cfg.num_classes)

            stacked_probs = torch.nn.Softmax(logits, dim=-1)
            stacked_labels = np.reshape(end_points['labels'], [-1])
            point_idx = end_points['input_inds']
            cloud_idx = end_points['cloud_inds']

            stacked_probs = np.reshape(stacked_probs, [cfg.val_batch_size, cfg.num_points,
                                                       cfg.num_classes])

            for j in range(np.shape(stacked_probs)[0]):
                probs = stacked_probs[j, :, :]
                p_idx = point_idx[j, :]
                c_i = cloud_idx[j][0]
                test_probs[c_i][p_idx] = test_smooth * test_probs[c_i][p_idx] + (1 - test_smooth) * probs
            step_id += 1

        new_min = np.min(TEST_DATASET.min_possibility)
        log_string('Epoch {:3d}, end. Min possibility = {:.1f}'.format(epoch_id, new_min))

        if last_min + 1 < new_min:

            # Update last_min
            last_min += 1

            # Show vote results (On subcloud so it is not the good values here)
            log_string('\nConfusion on sub clouds')
            num_test = len(TEST_DATASET.input_labels)

            # Project predictions
            log_string('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
            proj_probs_list = []

            for i_test in range(num_test):
                # Reproject probs back to the evaluations points
                proj_idx = TEST_DATASET.test_proj[i_test]
                probs = test_probs[i_test][proj_idx, :]
                proj_probs_list += [probs]

            # Show vote results
            log_string('Confusion on full clouds')
            for i_test in range(num_test):
                # Get the predicted labels
                preds = TEST_DATASET.label_values[np.argmax(proj_probs_list[i_test], axis=1)].astype(np.uint8)
                save_name = join(test_path, 'test_preds', TEST_DATASET.input_names[i_test] + '.label')
                preds = preds.astype(np.uint8)
                preds.tofile(save_name)

            # creat submission files
            base_dir = dirname(abspath(__file__))
            results_path = join(base_dir, test_path, 'test_preds')
            system('cd %s && zip -r %s/submission.zip *.label' % (results_path, results_path))

        epoch_id += 1
        step_id = 0


if __name__ == '__main__':
    train(start_epoch)
