# -*- coding: utf-8 -*-
from __future__ import print_function
import torch
import torch.optim as optim
import threading
import yaml
from .logger import logger


class JobStatus:
    r"""Job status in the database.
    """
    WAITING = 'WAITING'
    PROFILING = 'PROFILING'
    PROFILED = 'PROFILED'
    INPOOL = 'INPOOL'
    SCHEDULING = 'SCHEDULING' 
    MIGRATING = 'MIGRATING' 
    MIGRATION = 'MIGRATION' 
    FINISHED = 'FINISHED' 
    EXCEPTION = 'EXCEPTION' 


class JobTaskStatus:
    r"""Job status in the real-time database.
    """
    NONE = 'NONE' 
    BEGINING = 'BEGINING'
    COMPUTING = 'COMPUTING' 
    COMPUTATION_WAITING = 'COMPUTATION_WAITING' 
    COMPUTATION_FINISHED  = 'COMPUTATION_FINISHED' 
    COMMUNICATING = 'COMMUNICATING' 
    COMMUNICATION_WAITING = 'COMMUNICATION_WAITING' 
    COMMUNICATION_FINISHED  = 'COMMUNICATION_FINISHED' 
    EXCEPTION = 'EXCEPTION' 
    MIGRATING = 'MIGRATING' 
    MIGRATION = 'MIGRATION' 
    FINISHED = 'FINISHED'
    END = 'END'


class Job(object):
    r"""Base class of a Job

    Args:
        conf_yaml (str): The config file with yaml format.

    Attributes:
        _config (dict): Dict object loaded from ``conf_yaml``.
        _id (str): Job id.
        _user_id (str): User id.
        _optimizer (optim.Optimizer): Torch optimizer.
        _model (nn.Module): Torch model.
        _dataset (tuple): Training dataset and testing dataset.
        _criterion (nn.Module): Torch loss function.
    """
    def __init__(self, conf_yaml):
        with open(conf_yaml, 'r') as f:
            config = yaml.load(f, Loader=yaml.FullLoader)
        self._config = config
        self._id = config['id']
        self._user_id = config['uid']
        self._optimizer = None
        self._model = None
        self._dataset = (None, None)
        self._criterion = None
        self._bs = None
        self._bs_lock = threading.Lock()

    def __str__(self):
        return self._config['name'] + '-' + str(self._config['id'])

    def get_id(self):
        return self._id

    def update_bs(self, bs):
        self._bs_lock.acquire()
        self._bs = bs
        self._bs_lock.release()

    def get_current_bs(self):
        self._bs_lock.acquire()
        bs = self._bs
        self._bs_lock.release()
        if bs is not None:
            return bs
        return self.config()['dataset']['batch_size']

    def get_original_bs(self):
        return self.config()['dataset']['batch_size']

    def config(self):
        r"""Get the config object.
        Returns
            config (dict): Configuration.
        """
        return self._config

    def optimizer(self):
        r"""Get optimizer.
        Returns
            optimizer (optim.Optimizer): Optimizer.
        """
        if self._optimizer:
            return self._optimizer
        self._optimizer = self.build_optimizer()
        return self._optimizer

    def dataset(self):
        r"""Get dataset (train and test).
        Returns
            dataset (tuple): train and test datasets.
        """
        if self._dataset[0] is None:
            self._dataset = self.build_dataset()
        return self._dataset

    def model(self):
        r"""Get model. 
        Returns
            model (nn.Module): Torch model. 
        """
        if self._model is None:
            self._model = self.build_model()
        return self._model

    def criterion(self):
        r"""Get loss function. 
        Returns
            loss_func (nn.Module): Torch loss function. 
        """
        if self._criterion is None:
            self._criterion = self.build_criterion()
        return self._criterion

    def build_optimizer(self):
        r"""Create the optimizer.

        Returns:
            optimizer (optim.Optimizer).
        """
        model = self.model()
        config = self.config()['optimizer']
        optimizer = optim.SGD(model.parameters(), 
                lr=config['lr'],
                momentum=config['momentum'])
        return optimizer

    def build_dataset(self):
        r"""Create the dataset.

        Returns:
            dataset (tuple): train and test datasets.
        """
        logger.error('Please override the function: build_dataset')
        raise NotImplementedError

    def build_model(self):
        r"""Create the model.

        Returns:
            model (nn.Module): Torch model. 
        """
        logger.error('Please override the function: build_model')
        raise NotImplementedError

    def build_criterion(self):
        r"""Create the loss function.

        Returns:
            loss_func (nn.Module): Torch loss function. 
        """
        logger.error('Please override the function: build_criterion')
        raise NotImplementedError

    def training_step(self, batch, model):
        r"""Train one step.

        Args:
            batch (obj): User customized input (one mini-batch).
            model (nn.Module): Torch model.

        Returns:
            loss (Tensor): Loss value of feed-forward.
            outputs (Tensor): Outputs of feed-forward. 
        """
        x, y = batch
        criterion = self.criterion()
        outputs = model(x)
        loss = criterion(outputs, y)
        return loss, outputs

    def cal_eval_performance(self, batch_outputs, batch_inputs):
        r"""Calculate the evaluation performance.

        Args:
            batch_outputs (Tensor): Outputs from ``training_step``.
            batch_inputs (Tensor): User customized input (one mini-batch). 

        Returns:
            accuracies (tuple): Top-k accuracy.
        """
        outputs = batch_outputs
        _, labels = batch_inputs
        config = self.config()['evaluation']
        topk = config.get('topk', (1,))
        with torch.no_grad():
            maxk = max(topk)
            batch_size = labels.size(0)
            _, pred = outputs.topk(maxk, 1, True, True)
            pred = pred.t()
            correct = pred.eq(labels.view(1, -1).expand_as(pred))
            res = []
            for k in topk:
                correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
                res.append(correct_k.mul_(1.0 / batch_size))
            return res
