# -*- coding: utf-8 -*-
from __future__ import print_function
import yaml
import os
from datetime import datetime
from ddl_platform.ddlib.job import JobStatus, JobTaskStatus
from ddl_platform.common.settings import *

SUPPORT_NETS = ['resnet20', 'fcn5', 'resnet50', 'vgg16']
CODES = {
         'resnet20': '%s/examples/cifar10_resnet20' % ROOT_DIR, \
         'fcn5': '%s/examples/mnist_fcn5' % ROOT_DIR, \
         'resnet50': '%s/examples/imagenet_resnet50' % ROOT_DIR, \
         'vgg16': '%s/examples/imagenet_vgg16' % ROOT_DIR, \
        }
status = JobStatus.WAITING 
progress = 0


class JobInfo:

    def __init__(self, codedir, conf):
        self._job_id = -1
        self._forward_time = -1
        self._backward_time = -1
        self._communication_time = -1
        self._code_dir = codedir
        self._conf = conf
        self._arrival_time = datetime.timestamp(datetime.now())
        self._updated_time = datetime.timestamp(datetime.now())
        self._completion_time = -1 
        self._status = JobStatus.WAITING
        self._task_status = JobTaskStatus.NONE

        # gpu memory consumption
        self._train_mem = 0.0
        self._model_mem = 0.0
        self._batch_mem = 0.0
        self._batch_size = 0
        self._updated_bs = 0
        self._throughput = 0
        self._progress = 0
        self._iteration_time = 0
        self._tcomp = 0 
        self._tcomm = 0
        self._bs_mems = {}
        self._bs_iteration_times = {}
        self._exception = 'NONE' 
        self._checkpoint_path = '' 
        self._accumulate_running_time = 0

        self.load_job_config(conf)

        self._node_ids = []
        self._gpu_ids = []

    def __str__(self):
        return 'job: %s <_status: %s, arrive at: %s, gpu_mem:%s, iter_time:%s, bs:%d/%d, ngpus:%d, running_time: %.3f>' % \
                (str(self._job_id), self._status, str(self._arrival_time), self._bs_mems, self._bs_iteration_times, self.get_updated_bs(), self.get_bs(), self._ngpus, self._accumulate_running_time)

    def __repr__(self):
        return str(self)
#
    def load_job_config(self, conf):

        file_yaml = os.path.join(self._code_dir, self._conf)
        with open(file_yaml, 'r') as f:
            self.job_yaml = yaml.safe_load(f)

        self._job_id = self.job_yaml["id"]
        self._user_id = self.job_yaml["uid"]
        self._ngpus = self.job_yaml["optimizer"]["ngpus"]
        self._worker_statuses = [JobTaskStatus.NONE] * self._ngpus
        self._batch_size = self.job_yaml["dataset"]["batch_size"]
        self._updated_bs = self._batch_size
    
        self._progress = progress
        self._epochs = self.job_yaml["optimizer"]["epochs"]
        self._steps = self.job_yaml["optimizer"].get("steps", 0)

        # calculate the needed memory size of each GPU
        if 'performance' in self.job_yaml:
            if 'model_mem' in self.job_yaml["performance"]:
                self._model_mem = self.job_yaml["performance"]["model_mem"]
            if 'batch_mem' in self.job_yaml["performance"]:
                self._batch_mem = self.job_yaml["performance"]["batch_mem"]
        #self._train_mem = (self._model_mem + self._batch_mem * self._batch_size) * (2**20)

    def get_id(self):
        return self._job_id

    def get_ngpus(self):
        return self._ngpus

    def get_config_file(self):
        file_yaml = os.path.join(self._code_dir, self._conf)
        return file_yaml

    def get_updated_time(self):
        return self._updated_time

    def get_avail_bs_mems(self):
        return self._bs_mems

    def get_avail_bs_times(self):
        return self._bs_iteration_times

    def set_avail_bs_mems(self, bs_mems):
        self._bs_mems = bs_mems

    def get_bs(self):
        return self._batch_size

    def get_updated_bs(self):
        return self._updated_bs

    def is_waiting(self):
        return self._status == JobStatus.WAITING

    def activate(self):
        self._status = JobStatus.SCHEDULING

    def get_status(self):
        return self._status

    def get_task_status(self):
        return self._task_status

    def update_status(self, status):
        self._status = status

    def update_progress(self, progress):
        self._progress = progress

    def get_occupied_mem(self):
        return self._bs_mems.get(self._batch_size, 0)

    def update_occupied_mem(self, memory):
        self._bs_mems[self._batch_size]= memory

    def get_occupied_mem_with_bs(self, bs):
        return self._bs_mems.get(bs, 0)

    def update_occupied_mem_with_bs(self, bs, memory):
        self._bs_mems[bs] = memory

    def update_iteration_time_with_bs(self, bs, iteration_time):
        self._bs_iteration_times[bs] = iteration_time

    def update_time_info(self, bs, forward_time, backward_time, communication_time):
        self._forward_time = forward_time
        self._backward_time = backward_time
        self._communication_time = communication_time 
        self._bs_iteration_times[bs] = self._forward_time+self._backward_time+self._communication_time

    def get_iteration_time(self):
        return self._bs_iteration_times.get(self._batch_size, -1)

    def get_iteration_time_with_bs(self, bs):
        return self._bs_iteration_times.get(bs, -1)

    def get_left_time(self):
        iter_time = self.get_iteration_time()
        left_iters = self._steps - self._progress
        return iter_time * left_iters

    def add_gpu(self, gpu):
        self._gpu_ids.append(gpu.get_gpu_id())
        self._node_ids.append(gpu.get_node_id())

    def get_node_ids(self):
        return self._node_ids

    def get_gpu_ids(self):
        return self._gpu_ids

    def add_running_time(self, running_time):
        self._accumulate_running_time += running_time

    def get_accumulate_running_time(self):
        return self._accumulate_running_time
