# -*- coding: utf-8 -*-
r"""Trainer
"""
from __future__ import print_function
import os
import sys
import time
import torch
import threading
import wandb
import torch.cuda as ct

from pathlib import Path
from queue import Queue
from ddl_platform.common import settings
import horovod.torch as hvd
#from . import dopt as hvd
from . import utils
from .logger import logger, add_log_file
from .communicator import Communicator
from .job import JobTaskStatus
from .compression import compressors

from ddl_platform.database.scheduling_job_table import scheduling_job_table


#os.environ['HOROVOD_NUM_NCCL_STREAMS'] = '1' 
#os.environ['HOROVOD_FUSION_THRESHOLD'] = '0'
#os.environ['HOROVOD_CACHE_CAPACITY'] = '0'
#os.environ['HOROVOD_CYCLE_TIME'] = '0'
#os.environ['HOROVOD_STALL_CHECK_TIME_SECONDS'] = '3600'


class Trainer:
    r"""The trainer recieves a ``job`` object as input, and ``comm_host``,
        ``comm_port`` for communication.
    Args:
        job (Job): An initialized job object.
        comm_host (str): The host for messaging, e.g., a redis server ip address.
        comm_port (int): The port for messaging, e.g., a redis server port.

    Attributes:
        _job (Job): The initialized job object.
        _nworkers (int): The number of workers (i.e., GPUs) for running the job.
        _cuda (bool): Indicate whether to use GPU, and it is always True.
        _rank (int): The rank in the distributed setting.
        _comm (Communicator): If ``comm_host`` and ``comm_port`` can be accessed properly, then
            a ``Communicator`` object is generated for messaging.
    """

    def __init__(self, job, comm_host=None, comm_port=None):
        self._job = job
        config = self._job.config()

        optimizer_config = config['optimizer']
        self._nworkers = optimizer_config['ngpus']
        self._cuda = True
        self._rank = 0 # rank should be updated according to ngpus
        self._isprofiling = config.get('profiling', False)

        if self._nworkers > 1:
            try:
                hvd.size()
            except:
                hvd.init()

        logfile = config['log'].get('logfile', 'job.log')
        dirname = os.path.dirname(logfile)
        logfile = os.path.join(dirname, 'job-%s-' % str(self._job.get_id()) + Path(logfile).name)
        print('========logfiel: ', logfile)
        add_log_file(logfile)


        self._user_id = config['uid']
        self._id = config['id']
        self._multi_job_scheduling = config.get('scheduling', False)

        gpus_fn = '%s/%s_%s_gpus' % (settings.CONFIG_FOLDER, self._user_id, self._id)
        logger.info('----------> gpus_fn: %s', gpus_fn)
        gpu_ids = []

        if os.path.isfile(gpus_fn):
            with open(gpus_fn, 'r') as f:
                gpu_ids = f.readlines()
                gpu_ids = [int(gid.strip()) for gid in gpu_ids]
        else:
            gpu_ids = [i % 4 for i in range(self._nworkers)]

        if self._nworkers > 1:
            self._rank = hvd.rank()
            if self._cuda:
                torch.cuda.set_device(gpu_ids[self._rank])

        if self._rank == 0:
            logger.info('gpus_fn: %s', gpus_fn)

        if self._rank == 0:
            logger.info("GPU IDs: %s, nworkers: %d." % (gpu_ids, self._nworkers))

        wandbconfig = config['log'].get('wandb', None)
        if self._rank == 0 and wandbconfig and wandbconfig['enabled']:
            self._wandb = True
            wandb.init(project=wandbconfig['project'], entity=wandbconfig['entity'], name=logfile, config=config)
        else:
            self._wandb = False

        self._previous_bs = config['dataset']['batch_size']
        self._train_iter = 0
        self._train_epoch = 0
        try:
            self._comm = Communicator(rank=self._rank, host=comm_host, port=comm_port)
        except:
            logger.exception('Communicator connect error!')
        self._running = True
        self._current_running_start_time = None
        self._control_cmds = None #{'_timestamp': time.time(), 'interval': 
        self._new_cmd_timestamp = time.time()
        self._dynamic_occupied_gpu_mem = -1 #self._job.get_occuppied_gpu_mem()
        self._msg_queue = Queue()
        self._msg_thread = threading.Thread(target=self.message_thread_run)  if self._multi_job_scheduling else None

    def message_thread_run(self):
        interval = time.time()
        while self._running:
            try:
                msg = self.wait_for_signal()
                if msg is not None:
                    # Receive a new msg
                    if self._control_cmds is None or self._control_cmds['_timestamp'] != msg['_timestamp']: 
                        #logger.info('[Rank: %d] updated control_cmds: %s', self._rank, msg)
                        self._control_cmds = msg
                        self._new_cmd_timestamp = time.time()
                        if self._multi_job_scheduling:
                            self.adjust_batch_size(int(msg['updated_bs']))
                if time.time() - interval > 1 and self._train_iter > 0:
                    self._dynamic_occupied_gpu_mem = utils.get_occuppied_gpu_mem()
                    interval = time.time()
                time.sleep(0.001)
            except:
                logger.exception('[Rank: %d] message running thread error!', self._rank)
                self._running = False
                break


    def config_dataloader(self):
        """Configure the dataloaders (training and testing).
        Initilize the ``trainloader (DataLoader)`` and ``testloader (DataLoader)``
        from the settings in ``_job``.
        """
        config = self._job.config()['dataset']
        batch_size = self._job.get_original_bs()
        num_cpu_threads = config.get('num_cpu_threads', 1)
        train_dataset, test_dataset = self._job.dataset()

        nworkers = self._nworkers
        train_sampler = None
        shuffle = True
        if nworkers > 1:
            train_sampler = torch.utils.data.distributed.DistributedSampler(
                train_dataset, num_replicas=nworkers, rank=self._rank)
            train_sampler.set_epoch(0)
            shuffle = False
            self.train_sampler = train_sampler
        else:
            self.train_sampler = None

       
        if self._job.__dict__.get('_collate_fn', None) is None:
            trainloader = torch.utils.data.DataLoader(train_dataset,
                                                      batch_size=batch_size,
                                                      shuffle=shuffle,
                                                      num_workers=num_cpu_threads,
                                                      sampler=train_sampler,
                                                      drop_last=True)
        else:
            trainloader = torch.utils.data.DataLoader(train_dataset,
                                                      batch_size=batch_size,
                                                      shuffle=shuffle,
                                                      num_workers=num_cpu_threads,
                                                      sampler=train_sampler,
                                                      collate_fn=self._job._collate_fn,
                                                      drop_last=True)

        eval_conf = self._job.config()['evaluation']

        #TODO: change to eval with multiple workers 
        if False and nworkers > 1:
            test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset, num_replicas=nworkers, rank=self._rank)
        else:
            test_sampler=None
        if self._job.__dict__.get('_collate_fn', None) is None:
            testloader = torch.utils.data.DataLoader(
                test_dataset,
                batch_size=eval_conf['batch_size'],
                sampler=test_sampler,
                shuffle=False, num_workers=2)
        else:
            testloader = torch.utils.data.DataLoader(
                test_dataset,
                batch_size=eval_conf['batch_size'],
                sampler=test_sampler,
                collate_fn=self._job._collate_fn,
                shuffle=False, num_workers=2)
        return trainloader, testloader

    def wait_for_signal(self):
        r"""Receive signal from ``Scheduler``.

        Returns:
            True if receiving a True signal from the ``Scheduler``,
                otherwise continue waiting.
        """
        msg = self._comm.sub_info_from_scheduler()
        return msg

    def train_one_step(self, batch, model, optimizer, iteration, accumulated=False):
        r"""Train a single step with current data: ``batch``.

        Args:
            batch (tuple): Customized input data.
            model (nn.Module): PyTorch module.
            optimizer (torch.optim.Optimizer): Pytorch optimizer.
        Returns:
            loss (float): Loss of current step.
            time_meta (tuple): Meta information of the elapsed time of different phases,
                ``pcie_time, forward_time, backward_time, computing_time, communication_time``.
        """
        pcie_start = time.time()
        if self._cuda:
            batch = utils.cuda(batch)
        pcie_time = time.time() - pcie_start

        computing_start = time.time()
        wait_comp_time = 0
        log_step = 1000
        if self._multi_job_scheduling and self._rank == 0:
            if iteration == 0:
                self._comm.update_job_record(JobTaskStatus.BEGINING)
            wait_comp_time = time.time() - computing_start

        forward_start = time.time()
        loss, _ = self._job.training_step(batch, model)
        forward_time = time.time() - forward_start

        backward_start = time.time()
        loss.backward()
        #current_stream = torch.cuda.current_stream()
        #current_stream.synchronize()
        backward_time = time.time() - backward_start 
        computing_time = time.time() - computing_start

        communication_start = time.time()
        wait_comm_time = 0

        if self._nworkers > 1 and self._multi_job_scheduling:
            wait_comm_time = time.time() - communication_start
        #if self._rank == 0 and iteration % log_step == 0:
        #    logger.info('Starting to communicate (all-reduce) at timestamp %s', time.time())
        if not accumulated:
            optimizer.step()
        #current_stream = torch.cuda.current_stream()
        #current_stream.synchronize()
        communication_time = time.time() - communication_start

        return loss, (pcie_time, forward_time, backward_time, computing_time, communication_time, wait_comp_time, wait_comm_time)

    def evaluation(self, epoch, test_dataloader, model):
        r"""Evaluation with test data.

        Args:
            epoch (int): Current evaluation epoch.
            test_dataloader (Dataloader): Dataloader for testing.
            model (nn.Module): PyTorch module.

        Returns:
            eval_perfs (list):
        """
        model.eval()
        eval_perfs = []
        epoch_eval_loss = 0.
        t_eval = 0
        for eval_batch in test_dataloader:
            if self._cuda:
                eval_batch = utils.cuda(eval_batch)
            with torch.no_grad():
                eval_loss, eval_outputs = self._job.training_step(eval_batch, model)
            epoch_eval_loss += eval_loss
            t_eval += 1
            eval_perf = self._job.cal_eval_performance(eval_outputs, eval_batch)
            if len(eval_perfs) == 0:
                eval_perfs = [0] * len(eval_perf)
            for i, eval_perf in enumerate(eval_perf):
                eval_perfs[i] += eval_perf
        eval_perfs = [float(e/t_eval) for e in eval_perfs]
        eval_perf_str = ','.join([str(e) for e in eval_perfs])
        if self._rank == 0:
            logger.info('Epoch [%d] average eval loss: %f, eval perf: %s' %
                        (epoch+1, epoch_eval_loss/t_eval, eval_perf_str))
        if self._wandb:
            wandb.log({"val loss": epoch_eval_loss/t_eval, "epoch": epoch, 'val acc': eval_perfs[0]})

        model.train()
        return eval_perfs

    def adjust_lr(self, iteration, epoch, optimizer):
        progress = epoch
        config = self._job.config()
        base_lr = config['optimizer']['lr']
        warmup = config['optimizer'].get('warmup', 0)
        lr_scheduler = config['optimizer'].get('lr_scheduler', [])
        train_dataloader = self._train_dataloader
        num_iters_per_epoch = len(train_dataloader)
        if progress < warmup:
            warmup_total_iters = num_iters_per_epoch * warmup
            min_lr = base_lr / warmup_total_iters #self._nworkers
            lr_interval = (base_lr - min_lr) / warmup_total_iters
            lr = min_lr + lr_interval * self._train_iter
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
            return lr
        decay = 1
        for epoch in lr_scheduler:
            if progress > epoch:
                decay *= 0.1
        lr = decay * base_lr
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        return lr

    def adjust_batch_size(self, bs):
        current_bs = self._job.get_current_bs()
        if current_bs == bs:
            return
        self._job.update_bs(bs)
        logger.info('[Rank: %d] Job: %s, Adjusting the mini-batch size to: %d', self._rank, self._job, bs)

    def upload_running_time(self):
        if self._current_running_start_time is not None:

            if self._rank == 0:
                self._comm.accumulate_running_time(time.time()-self._current_running_start_time)
            self._current_running_start_time = None

    def wait(self):
        if not self._multi_job_scheduling:
            return True
        while self._running:
            if self._control_cmds is None \
                or self._control_cmds['interval'] == 0 \
                or time.time() - self._new_cmd_timestamp > self._control_cmds['interval']:
                torch.cuda.empty_cache()
                self.upload_running_time()
                time.sleep(0.1)
            elif self._control_cmds.get('migration', False):
                self.save_checkpoint()
                self.upload_running_time()
                return False
            else:
                if self._current_running_start_time is None:
                    self._current_running_start_time = time.time()
                return True

    def profiling(self):
        self._running = False
        train_dataloader, test_dataloader = self.config_dataloader()
        config = self._job.config()
        batch_size = config['dataset']['batch_size']
        base_bs = batch_size
        profiled_bses = [base_bs]
        pbs = base_bs
        while pbs//2 >= 1:
            profiled_bses.append(pbs//2)
            pbs //= 2
        for bs in profiled_bses:
            self.adjust_batch_size(bs)
            torch.cuda.empty_cache()
            try:
                self._train_epoch = 0
                self._train_iter = 0
                iter_time = self.safe_fit()
                occupied_gpu_mem = utils.get_occuppied_gpu_mem()
            except:
                logger.exception('[Rank: %d] Exception', self._rank)
                occupied_gpu_mem = -1
                iter_time = -1
            self._comm.update_profiling_job_info(bs, occupied_gpu_mem, iter_time/(batch_size//bs))

    def save_checkpoint(self):
        if self._rank == 0:
            epoch = self._train_epoch
            iteration = self._train_iter
            config = self._job.config()
            folder = config['checkpoint'].get('folder', 'checkpoints')
            if not os.path.isdir(folder):
                os.mkdir(folder)
            state = {'iter': iteration, 'epoch': epoch, 'state': self._job.model().state_dict()}
            filename = 'model-epoch%d-iter%d' % (epoch, iteration)
            filename += '.pth'
            full_fn = os.path.join(folder, filename)
            torch.save(state, full_fn) 
            if self._multi_job_scheduling:
                logger.info('updated to migration')
                self._comm.update_job_record(JobTaskStatus.MIGRATION, checkpoint_path=full_fn)

    def fit(self):
        try:
            if self._isprofiling:
                self.profiling()
            else:
                self.safe_fit()
        except:
            self._running = False
            logger.exception("Error at rank %d occurred in fit: %s", self._rank, self._job)
            if self._multi_job_scheduling and self._rank == 0:
                self._comm.update_job_record(JobTaskStatus.EXCEPTION)
                #self._comm.pub_info_to_resource_manager()

    def safe_fit(self):
        """Train the model with user configurations.
        """
        start_time = time.time()
        config = self._job.config()

        self._comm.create_job_record(self._job, self._isprofiling)
        if self._nworkers > 1:
            hvd.barrier()

        if self._msg_thread:
            self._msg_thread.start()
        if self._rank == 0:
            logger.info('Job: %s begins', str(self._job))

        self.wait()

        if self._rank == 0:
            logger.info('Training with config: %s', config)
        multi_job_scheduling = config.get('scheduling', False)
        train_dataloader, test_dataloader = self.config_dataloader()

        if self._nworkers > 1:
            hvd.barrier()

        self._train_dataloader = train_dataloader
        self._test_dataloader = test_dataloader 

        model = self._job.model()
        checkpoint_path = config.get('checkpoint_path', None)
        if not self._isprofiling and checkpoint_path and os.path.isfile(checkpoint_path):
            logger.info('[Rank %d] Loading check point: %s', self._rank, checkpoint_path)
            checkpoint = torch.load(checkpoint_path)
            model.load_state_dict(checkpoint['state'])
            self._train_iter = checkpoint['iter']
            self._train_epoch = checkpoint['epoch']


        criterion = self._job.criterion()
        if self._cuda:
            model.cuda()
            criterion = criterion.cuda()

        optimizer = self._job.optimizer()

        if self._nworkers > 1:
            compressor = config['optimizer'].get('compressor', 'none')
            density = config['optimizer'].get('density', 1)
            threshold = config['optimizer'].get('threshold', -1)
            momentum_correction = config['optimizer'].get('momentum_correction', False)
            if compressor == 'none' and density < 1:
                logger.warning('optimizer.density=%f will be ignored as compressor=%s' % (density, compressor))
                density = 1
            # optimizer = hvd.DistributedOptimizer(
            #     optimizer,
            #     named_parameters=model.named_parameters(),
            #     compression=compressors[compressor](),
            #     is_sparse=density<1,
            #     density=density,
            #     seq_layernames=None,
            #     layerwise_times=None,
            #     norm_clip=None,
            #     threshold=threshold,
            #     writer=None,
            #     gradient_path='./',
            #     momentum_correction=momentum_correction,
            #     fp16=False,
            #     mgwfbp=False,
            #     rdma=False,
            #     multi_job_scheduling=multi_job_scheduling)
            optimizer = hvd.DistributedOptimizer(
                optimizer,
                named_parameters=model.named_parameters(),
                compression=compressors[compressor](),
                )
            hvd.broadcast_parameters(model.state_dict(), root_rank=0)
            #hvd.broadcast_optimizer_state(optimizer, root_rank=0)

        epochs = config['optimizer']['epochs']
        steps = config['optimizer'].get('steps', 0)
        display = config['optimizer'].get('display', 40)
        evaluation = config['evaluation'].get('enabled', False)
        checkpoint = config['checkpoint'].get('enabled', False)

        iteration = self._train_iter
        best_eval_perf = 0
        best_train_loss = 1e10
        is_best = False
        metric = config['checkpoint'].get('metric', 'eval')
        batch_size = config['dataset']['batch_size']

        avg_iter_time = 0
        iter_time_start = time.time()
        train_start_time = time.time()
        num_current_iters = 0
        cont = True
        for epoch in range(self._train_epoch, epochs):
            if self._nworkers > 1:
                optimizer.train_epoch = epoch
            if self.train_sampler:
                self.train_sampler.set_epoch(epoch)

            current_lr = self.adjust_lr(self._train_iter, epoch, optimizer)
            epoch_loss = 0.0
            num_iters_per_epoch = 0
            stime = time.time()
            computing_time_sum = 0
            communication_time_sum = 0
            wait_comp_time_sum = 0
            wait_comm_time_sum = 0
            for batch_idx, batch in enumerate(self._train_dataloader):
                cont = self.wait()
                if not cont:
                    break

                current_bs = self._job.get_current_bs()

                if current_bs != self._previous_bs:
                    torch.cuda.empty_cache()
                    self._previous_bs = current_bs

                n_accumulate = batch_size // current_bs
                optimizer.zero_grad()
                train_loss = 0.0
                for j in range(n_accumulate):
                    if self._nworkers > 1:
                        optimizer.local = j < n_accumulate - 1
                    if 'get_sub_batch' in dir(self._job):
                    #if hasattr(self._job, 'get_sub_batch'):
                        sub_batch = self._job.get_sub_batch(batch, j, current_bs)
                    else:
                        sub_batch = [x[j*current_bs:(j+1)*current_bs] for x in batch]
                    loss, meta_time = self.train_one_step(tuple(sub_batch), model, optimizer, iteration, accumulated=(j<n_accumulate-1))
                    train_loss += loss
                pcie_time, forward_time, backward_time, computing_time, communication_time, wait_comp_time, wait_comm_time = meta_time

                if self._multi_job_scheduling and self._rank == 0:
                    self._comm.update_job_info(JobTaskStatus.COMPUTING,
                            iteration,
                            current_bs,
                            self._dynamic_occupied_gpu_mem,
                            current_bs / (computing_time+communication_time_sum),
                            forward_time,
                            backward_time,
                            communication_time_sum)
                    self.upload_running_time()

                computing_time_sum += computing_time
                communication_time_sum += communication_time
                wait_comp_time_sum += wait_comp_time
                wait_comm_time_sum += wait_comm_time
                epoch_loss = epoch_loss + train_loss/n_accumulate
                num_iters_per_epoch += 1
                self._train_iter += 1
                iteration += 1
                num_current_iters += 1
                if iteration % display == 0:
                    if self._rank == 0:
                        avg_iter_time = (time.time() - iter_time_start) / display
                        iter_time_start = time.time()
                        logger.info('Epoch [%d/%d] - Iter [%d]: loss: %f, avg_iter_time: %.3fs, throughput: %.3fsamples/s' %
                                    (epoch, epochs, iteration, train_loss.item()/n_accumulate, avg_iter_time, batch_size/avg_iter_time))
                    if self._wandb:
                        wandb.log({"train loss": train_loss.item()/n_accumulate, "epoch": epoch, 'iteration': iteration})

                if (steps != 0) and (iteration > steps):
                    break
                if not cont:
                    break
            if not cont:
                break
            self._train_epoch += 1
            avg_loss_per_epoch = epoch_loss / num_iters_per_epoch
            if self._rank == 0:
                logger.info('Epoch [%d/%d] epoch time: %.5f s, computing_time: %.5fs(wait: %.5fs), communication_time: %.5fs(wait: %.5fs), average train loss: %f, current_lr: %f' %
                            (epoch+1, epochs, time.time()-stime, computing_time_sum/iteration, wait_comp_time_sum/iteration, communication_time_sum/iteration, wait_comm_time_sum/iteration, avg_loss_per_epoch, current_lr))
                mbytes = 1024.*1024
                logger.info('Epoch [%d/%d] occupied GPU memory: %d MBytes, max_memory_allocated: %d MBytes, memory_cached: %d MBytes, max_memory_cached: %d MBytes, real_occupied_mem: %d MBytes', epoch+1, epochs, ct.memory_allocated()/mbytes, ct.max_memory_allocated()/mbytes, ct.memory_reserved()/mbytes, ct.max_memory_reserved()/mbytes, self._dynamic_occupied_gpu_mem/mbytes)

            if best_train_loss > avg_loss_per_epoch:
                best_train_loss = avg_loss_per_epoch
                if metric == 'train':
                    is_best = True

            if evaluation and self._rank == 0:
                eval_perfs = self.evaluation(epoch, test_dataloader, model)
                if eval_perfs[0] > best_eval_perf:
                    best_eval_perf = eval_perfs[0]
                    if metric == 'eval':
                        is_best = True

            if checkpoint and self._rank == 0:
                folder = config['checkpoint'].get('folder', 'checkpoints')
                frequency = config['checkpoint'].get('frequency', 0)
                if not os.path.isdir(folder):
                    os.mkdir(folder)
                state = {'iter': iteration, 'epoch': epoch, 'state': model.state_dict()}
                if frequency > 0 and epoch % frequency == 0:
                    filename = 'model-epoch%d-iter%d-loss%.5f' % \
                                (epoch, iteration, avg_loss_per_epoch)
                    if evaluation:
                        filename = 'model-epoch%d-iter%d-loss%.5f-eval%.5f' % \
                                    (epoch, iteration, avg_loss_per_epoch, eval_perfs[0])
                    filename += '.pth'
                    torch.save(state, os.path.join(folder, filename))
                if is_best:
                    filename = 'bestmodel.pth'
                    torch.save(state, os.path.join(folder, filename))
            
            # control the running steps
            if (steps != 0) and (iteration > steps):
                break
        self._running = False
        logger.info('Rank: %d training finished, time used: %s', self._rank, time.time() - start_time)
        if self._multi_job_scheduling and self._rank == 0:
            if cont:
                self._comm.update_job_record(JobTaskStatus.FINISHED)
        return (time.time() - train_start_time)/num_current_iters
