import os
import time
import random

import jittor as jt
import jittor.nn as nn

from mit_utils import cfg
from mit_utils import TrainDataset, DataLoader
from mit_utils import AverageMeter, setup_logger

from model import EncoderDecoder


# train one epoch
def train(segmentation_module, iterator, optimizer, history, epoch, cfg, num_gpus=1):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    ave_total_loss = AverageMeter()
    ave_acc = AverageMeter()

    segmentation_module.train()

    # main loop
    for i in range(cfg.TRAIN.epoch_iters):
        tic = time.time()
        # load a batch of data
        batch_data = next(iterator)

        # changes in case of only 1 GPU
        if num_gpus == 1:
            batch_data = batch_data[0]

        toc = time.time()
        data_time.update(toc - tic)

        # adjust learning rate
        cur_iter = i + (epoch - 1) * cfg.TRAIN.epoch_iters
        adjust_learning_rate(optimizer, cur_iter, cfg)

        # forward pass
        st = time.time()
        loss, acc = segmentation_module(batch_data)
        loss = loss.mean()
        acc = acc.mean()

        # Backward
        # loss.backward()
        optimizer.step(loss)

        # measure elapsed time
        batch_time.update(time.time() - toc)
        tic = time.time()

        # update average loss and acc
        loss1 = loss.numpy().tolist()[0]
        acc1 = acc.numpy().tolist()[0] * 100

        ave_total_loss.update(loss1)
        ave_acc.update(acc1)

        # calculate accuracy, and display
        if i % cfg.TRAIN.disp_iter == 0:
            print('Epoch: [{}][{}/{}], Time: {:.2f}, Data: {:.2f}, '
                  'lr_encoder: {:.6f}, lr_decoder: {:.6f}, '
                  'Accuracy: {:4.2f}, Loss: {:.6f}'
                  .format(epoch, i, cfg.TRAIN.epoch_iters,
                          batch_time.average(), data_time.average(),
                          cfg.TRAIN.running_lr, cfg.TRAIN.running_lr,
                          ave_acc.average(), ave_total_loss.average()))
            with open(log_file, "a", encoding="utf-8") as f:
                f.write('Epoch: [{}][{}/{}], Time: {:.2f}, Data: {:.2f}, '
                  'lr_encoder: {:.6f}, lr_decoder: {:.6f}, '
                  'Accuracy: {:4.2f}, Loss: {:.6f}'
                  .format(epoch, i, cfg.TRAIN.epoch_iters,
                          batch_time.average(), data_time.average(),
                          cfg.TRAIN.running_lr, cfg.TRAIN.running_lr,
                          ave_acc.average(), ave_total_loss.average()))
                f.write('\n')
                f.close()

            fractional_epoch = epoch - 1 + 1. * i / cfg.TRAIN.epoch_iters
            history['train']['epoch'].append(fractional_epoch)
            history['train']['loss'].append(loss.numpy().tolist()[0])
            history['train']['acc'].append(acc.numpy().tolist()[0])

@jt.single_process_scope()
def checkpoint(net, history, cfg, epoch):
    print('Saving checkpoints...')
    jt.save(
        history,
        '{}/history_epoch_{}.pkl'.format(cfg.DIR, epoch))
    jt.save(
        net.state_dict(),
        '{}/epoch_{}.pkl'.format(cfg.DIR, epoch))


def adjust_learning_rate(optimizer, cur_iter, cfg):
    scale_running_lr = ((1. - float(cur_iter) / cfg.TRAIN.max_iters) ** cfg.TRAIN.lr_pow)
    cfg.TRAIN.running_lr = cfg.TRAIN.lr_encoder * scale_running_lr
    optimizer.lr = cfg.TRAIN.running_lr


def main(cfg):
    # TODO : CHANGE TO YOU PRETRAINED DIR
    pretrained = "/data/share/leixy/JSeg/resnet101_v1c-e67eebb6.pth"
    segmentation_module = EncoderDecoder(pretrained=pretrained)
    
    # Dataset
    dataset_train = TrainDataset(
        cfg.DATASET.root_dataset,
        cfg.DATASET.list_train,
        cfg.DATASET,
        batch_per_gpu=cfg.TRAIN.batch_size_per_gpu)

    loader_train = DataLoader(
        dataset_train,
        batch_size=1,
        shuffle=False,
        collate_fn=lambda x: x,
        num_workers=cfg.TRAIN.workers,
        drop_last=True,
        pin_memory=True)
    print('1 Epoch = {} iters'.format(cfg.TRAIN.epoch_iters))

    # create loader iterator
    iterator_train = iter(loader_train)

    # Set up optimizers
    optimizer =  nn.SGD(
        segmentation_module.parameters(),
        lr=cfg.TRAIN.lr_encoder,
        momentum=cfg.TRAIN.beta1,
        weight_decay=cfg.TRAIN.weight_decay)

    # Main loop
    history = {'train': {'epoch': [], 'loss': [], 'acc': []}}

    for epoch in range(cfg.TRAIN.start_epoch, cfg.TRAIN.num_epoch):
        train(segmentation_module, iterator_train, optimizer, history, epoch+1, cfg)

        # checkpointing
        checkpoint(segmentation_module, history, cfg, epoch+1)

    print('Training Done!')


if __name__ == '__main__':
    jt.flags.use_cuda = 1
    jt.flags.device_id = 6

    log_file = "log_initial_final.txt"

    cfg.merge_from_file("config/ade20k-resnet101dilated-cca_deepsup.yaml")

    logger = setup_logger(distributed_rank=0)

    # Output directory
    if not os.path.isdir(cfg.DIR):
        os.makedirs(cfg.DIR)
    logger.info("Outputing checkpoints to: {}".format(cfg.DIR))
    with open(os.path.join(cfg.DIR, 'config.yaml'), 'w') as f:
        f.write("{}".format(cfg))

    # Start from checkpoint
    if cfg.TRAIN.start_epoch > 0:
        cfg.MODEL.weights_encoder = os.path.join(
            cfg.DIR, 'encoder_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
        cfg.MODEL.weights_decoder = os.path.join(
            cfg.DIR, 'decoder_epoch_{}.pth'.format(cfg.TRAIN.start_epoch))
        assert os.path.exists(cfg.MODEL.weights_encoder) and \
            os.path.exists(cfg.MODEL.weights_decoder), "checkpoint does not exitst!"

    # Parse gpu ids
    cfg.TRAIN.batch_size = cfg.TRAIN.batch_size_per_gpu

    cfg.TRAIN.max_iters = cfg.TRAIN.epoch_iters * cfg.TRAIN.num_epoch
    cfg.TRAIN.running_lr_encoder = cfg.TRAIN.lr_encoder
    cfg.TRAIN.running_lr_decoder = cfg.TRAIN.lr_decoder
    cfg.TRAIN.running_lr = cfg.TRAIN.lr_encoder

    random.seed(cfg.TRAIN.seed)

    main(cfg)
