# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import time
import yaml
import redis
import pickle
import threading
import subprocess
import ddl_platform.common.settings as settings

from pathlib import Path    
from queue import Queue
from random import randint

from ddl_platform.common import utils 
from ddl_platform.ddlib.job import JobStatus, JobTaskStatus
from ddl_platform.database.silence_job_table import silence_job_table
from ddl_platform.database.profiled_job_table import profiled_job_table
from ddl_platform.scheduler.message_queue import MessageQueue

from job_info import JobInfo, SUPPORT_NETS, CODES


g_job_queue = Queue()
g_profiling_job_dict = {}


class ProfilingThread(threading.Thread):
    def __init__(self, job, cmd):
        super().__init__()
        self.job = job
        self.cmd = cmd 

    def run(self):

        p = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        #p = subprocess.Popen(self.cmd)
        print('cmd launched, waiting until it is finished...')
        output, error = p.communicate()

        silence_job_table.update_status(self.job._job_id, JobStatus.PROFILED)
        updated_job = silence_job_table.get(self.job._job_id)
        if p.returncode != 0:
            updated_job.update_status(JobStatus.EXCEPTION)
            print('-----------------job exception: ', updated_job, error)
        else:
            updated_job.update_status(JobStatus.WAITING)
            print('-----------------job finished: ', updated_job)
        profiled_job_table.insert(updated_job)
        g_profiling_job_dict.pop(self.job)

        # Update performance info to yaml file
        #yaml_fname = self.job._conf
        #old_conf = self.job.job_yaml
        #if 'performance' not in old_conf:
        #    old_conf['performance'] = {}
        #old_conf['performance']['batch_mem'] = updated_job.get_avail_bs_mems()
        #old_conf['performance']['batch_time'] = updated_job.get_avail_bs_times()
        #with open(yaml_fname, 'w') as f:
        #    f.write(yaml.dump(old_conf))


def launch(job):
    print('launch job: ', job, job._conf)
    codedir = job._code_dir
    job.load_job_config(job._conf)
    yaml_fname = Path(job._conf).name
    sandbox_yaml = os.path.join(settings.SANDBOX_DIR, 'profiling-'+yaml_fname)
    sandbox_job_yaml = job.job_yaml.copy()
    sandbox_job_yaml['profiling'] = True
    sandbox_job_yaml['scheduling'] = False
    sandbox_job_yaml['log']['logfile'] = os.path.join(settings.SANDBOX_DIR, 'profiling-' + Path(sandbox_job_yaml['log']['logfile']).name)
    sandbox_job_yaml['optimizer']['ngpus'] = 1
    sandbox_job_yaml['optimizer']['steps'] = 10 
    sandbox_job_yaml['optimizer']['epochs'] = 1
    with open(sandbox_yaml, 'w') as f:
        f.write(yaml.dump(sandbox_job_yaml))

    hosts_fn = os.path.join(settings.SANDBOX_DIR, 'cluster1')

    selected_gid = -1
    for gpu_id in range(settings.SANDBOX_NGPUS):
        if gpu_id not in g_profiling_job_dict.values():
            selected_gid = gpu_id
            break
    if selected_gid == -1:
        return False

    g_profiling_job_dict[job] = selected_gid
    cmd = settings.MPI_PREFIX
    cmd = cmd + ['-x', 'CUDA_VISIBLE_DEVICES=%d'%selected_gid, '-np', '1', \
           '-hostfile', '%s' % (hosts_fn), \
           os.path.join(settings.PYTHON_HOME, 'python'), '%s/main.py' % (codedir), \
           '--yaml', sandbox_yaml]

    print('launching cmd: ', cmd)
    job.activate()
    silence_job_table.update_status(job._job_id, JobStatus.PROFILING)
    ProfilingThread(job, cmd).start()
    return True


class JobManager:
    """Job manager
    Args:
        host (str, optional): Host of an intermediate channel for communication, e.g., a Redis host.
        port (int, optional): Port of an intermediate channel for communication, e.g., a Redis port.
    """
    def __init__(self, host='localhost', port=6379, set_name='test_set'):
        #self._msg_queue = MessageQueue(settings.JOB_QUEUE_STR, host, port)
        self._set_name = set_name

    def push_job_to_queue(self, job_info):
        r"""Push the job with full job info to the msg queue.

        Args:
            job_info (JobInfo): Job details.
        """
        dumped_job = pickle.dumps(job_info)
        #self._msg_queue.put(dumped_job)

    def profiling(self, job):
        launch(job)

    def run(self):
        r"""A deamon process to get the jobs.
        """
        # At the beginning, we should load the jobs from database
        logline = ""
        while True:
            silience_jobs = silence_job_table.query_waiting_jobs()
            silience_jobs.sort(key=lambda x: x._arrival_time)
            profiling_jobs = silence_job_table.query_profiling_jobs()

            newline = '# waiting: %d, # profiling: %d' % (len(silience_jobs), len(profiling_jobs))
            if newline != logline:
                logline = newline
                print('# waiting: %d, # profiling: %d' % (len(silience_jobs), len(profiling_jobs)))
            if len(profiling_jobs) < settings.SANDBOX_NGPUS:
                avail_gpus = settings.SANDBOX_NGPUS - len(profiling_jobs)
                for i, sjob in enumerate(silience_jobs):
                    if i >= avail_gpus:
                        break
                    self.profiling(sjob)
            time.sleep(1)


if __name__ == '__main__':
    job_manager = JobManager(host=settings.REDIS_HOST, port=settings.REDIS_PORT)
    job_manager.run()
