# -*- coding: utf-8 -*-
from __future__ import print_function

import os
import yaml
import time
import datetime
import pickle
import subprocess
import threading
import ddl_platform.common.settings as settings
import ddl_platform.common.utils as utils

from ddl_platform.database.silence_job_table import silence_job_table
from ddl_platform.database.profiled_job_table import profiled_job_table
from ddl_platform.database.scheduling_job_table import scheduling_job_table
from message_queue import MessageQueue
from cluster import Cluster
from job_info import JobInfo, SUPPORT_NETS, CODES
from ddl_platform.ddlib.job import JobStatus, JobTaskStatus
from random import sample
from queue import Queue

resource_lock = threading.Lock()
pool_lock = threading.Lock()
job_pool = Queue()

clust = Cluster()
msg_queue = MessageQueue(settings.JOB_QUEUE_STR, host=settings.REDIS_HOST, port=settings.REDIS_PORT)


class JobThread(threading.Thread):
    def __init__(self, job, cmd, callback=None):
        super().__init__()
        self.job = job
        self.cmd = cmd
        self.callback = callback

    def run(self):

        p = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        #p = subprocess.Popen(self.cmd)
        #print('cmd launched, waiting until it is finished...')
        output, error = p.communicate()

        jobid = self.job.get_id()

        sj = scheduling_job_table.get(self.job.get_id())
        task_status = sj.get_task_status()

        if p.returncode != 0 or task_status == JobTaskStatus.EXCEPTION:
            print('-----------------job finished: %s with excpetion: %s' % (sj, error.strip()))
            profiled_job_table.update_status(self.job.get_id(), JobStatus.EXCEPTION, exception=error)
        elif task_status == JobTaskStatus.MIGRATION:
            #print('-----------------job migration: ', sj)
            # Normal exit
            profiled_job_table.update_status(self.job.get_id(), JobStatus.MIGRATION, checkpoint_path=sj._checkpoint_path)
            utils.update_yaml_attr(self.job.get_config_file(), 'checkpoint_path', sj._checkpoint_path)
        else:
            #print('-----------------job finished: ', sj)
            # Normal exit
            profiled_job_table.update_status(self.job.get_id(), JobStatus.FINISHED)

        if self.callback is not None:
            self.callback(self.job)

        resource_lock.acquire()
        # release GPU resources
        node_ids = self.job.get_node_ids()
        gpu_ids = self.job.get_gpu_ids()
        for nid, gid in zip(node_ids, gpu_ids):
            gpu = clust.get_gpu(gid)
            if gpu:
                gpu.remove_job(self.job)

        scheduling_job_table.delete(self.job.get_id())
        resource_lock.release()


def launch(job, hosts_fn, gpus_fn):
    codedir = job._code_dir
    #print('launch job: ', job)
    #print('hostfile: ', hosts_fn)
    cmd = settings.MPI_PREFIX
    cmd = cmd + ['-np', '%d' % (job._ngpus), \
           '-hostfile', '%s' % (hosts_fn), \
           os.path.join(settings.PYTHON_HOME, 'python'), '%s/main.py' % (codedir), \
           '--yaml', job._conf]
    #print('launching cmd: ', cmd)
    job.activate()
    JobThread(job, cmd).start()

def get_job_from_queue():
    r"""Pull a job from the job queue
    Returns:
        job_info (JobInfo): A new job.
    """
    # At the beginning, we should load the jobs from the db
    while True:
        profiled_jobs = profiled_job_table.query_waiting_jobs()
        if len(profiled_jobs) > 0:
            print('Current # of jobs waiting for resource allocating: ', len(profiled_jobs))
        for j in profiled_jobs:
            pool_lock.acquire()
            job_pool.put(j)
            print('put job:', j)
            pool_lock.release()
            profiled_job_table.update_status(j._job_id, JobStatus.INPOOL)
        time.sleep(1)


def pick_job_from_pool():
    r"""choose a job from the waiting pool, FIFO by default
    """
    j = None
    pool_lock.acquire()
    if not job_pool.empty():
        j = job_pool.get()
        print('job_info: ', j)
    pool_lock.release()
    return j


class ResourceAllocatorBase():
    def __init__(self, cluster):
        self._cluster = cluster
        self._job_fetcher = threading.Thread(target=get_job_from_queue)
        self._job_fetcher.start()

    def config_hosts_and_gpus(self, job, selected_gpus):
        clust = self._cluster
        hosts = {}
        gpu_ids = []
        #print('selected_gpus: ', selected_gpus)
        for gpu in selected_gpus:
            #host = '%s:%d' % (gpu._node_name , gpu.get_gpu_idx())
            if gpu._node_name in hosts:
                hosts[gpu._node_name] += 1
            else:
                hosts[gpu._node_name] = 1
            gpu_ids.append(gpu.get_gpu_id())

            resource_lock.acquire()
            gpu.add_job(job)
            resource_lock.release()

            job.add_gpu(gpu)

        #clust.print_resources()
        return hosts, gpu_ids

    def generate_configs(self, job, hosts, gpu_ids):
        if not os.path.exists(settings.CONFIG_FOLDER):
            os.makedirs(settings.CONFIG_FOLDER)

        hosts_fn = '%s/%s_%s_hosts' % (settings.CONFIG_FOLDER, job._user_id, job._job_id)
        with open(hosts_fn, 'w') as f:
            for host in hosts:
                f.write("%s slots=%d\n" % (host, hosts[host]))

        gpus_fn = '%s/%s_%s_gpus' % (settings.CONFIG_FOLDER, job._user_id, job._job_id)
        with open(gpus_fn, 'w') as f:
            for gid in gpu_ids:
                gpu = self._cluster.get_gpu(gid)
                idx = gpu.get_gpu_idx()
                f.write("%s\n" % str(idx))

        #print('allocated resources: ', hosts_fn, gpus_fn)
        return hosts_fn, gpus_fn

    def update_cluster_status(self):
        """Update job status in ResourceManager from the info in Scheduler
        """
        scheduling_jobs = scheduling_job_table.query()
        self._cluster.update_cluster_resources(scheduling_jobs)
        # self._cluster.print_resources()
        # return self._cluster.get_resources_info()

    def allocate(self, job):
        pass


class ResourceAllocatorGreedy(ResourceAllocatorBase):
    def select_gpus(self, job, gpu_list):
        selected_gpus = []
        resource_lock.acquire()

        # 1. Sort nodes with maximum free resources
        nodes = self._cluster.get_nodes() 
        sorted_nodes = sorted(nodes, key=lambda x: (x.workload, abs(x._ngpus-job._ngpus)))
        #print('sorted_nodes: %s' % sorted_nodes)
        available_gpus = []
        for node in sorted_nodes:
            available_gpus.extend([gpu for gpu in node.get_gpus() if len(gpu.get_jobs()) < 2])
            #available_gpus.extend([gpu for gpu in node.get_gpus() if gpu.get_free_mem() >= job.get_occupied_mem() and len(gpu.get_jobs()) == 0])

        #available_gpus = [gpu for gpu in gpu_list if gpu.get_free_mem() >= job.get_occupied_mem()]
        #print('available_gpus: %s' % available_gpus)
        hp_gpus = []

        # Unused GPUs have a higher priority
        for gpu in available_gpus:
            if gpu.get_allocated_mem() == 0:
                hp_gpus.append(gpu)
        if len(hp_gpus) < job._ngpus:
            hp_gpus = available_gpus

        resource_lock.release()
        if len(hp_gpus) >= job._ngpus:
            selected_gpus = hp_gpus[:job._ngpus]
        
        return selected_gpus

    def allocate(self, job):
        codedir = job._code_dir
        conf_yaml = os.path.join(codedir, job._conf) 
        with open(conf_yaml, 'r') as f:
            config = yaml.load(f, Loader=yaml.FullLoader)
        ngpus = config['optimizer'].get('ngpus', 1)

        selected_gpus = self.select_gpus(job, self._cluster.get_gpus())

        if len(selected_gpus) == 0:
            return None, None
        hosts, gpu_ids = self.config_hosts_and_gpus(job, selected_gpus)
        hosts_fn, gpus_fn = self.generate_configs(job, hosts, gpu_ids)
        return hosts_fn, gpus_fn

    def run(self):
        while True:
            # Fetch from Database to get the current waiting jobs
            self.update_cluster_status()
            job_info = pick_job_from_pool()
            if job_info:
                hosts_fn, gpus_fn = self.allocate(job_info)
                if hosts_fn and gpus_fn:
                    job_info._status = JobStatus.SCHEDULING
                    scheduling_job_table.insert(job_info)
                    launch(job_info, hosts_fn, gpus_fn)
                    profiled_job_table.update_status(job_info._job_id, JobStatus.SCHEDULING)
                else:
                    pool_lock.acquire()
                    job_pool.put(job_info)
                    pool_lock.release()
            time.sleep(1)


class ResourceAllocatorSRTF(ResourceAllocatorGreedy):
    def select_gpus(self, job, gpu_list):
        selected_gpus = []
        resource_lock.acquire()

        # 1. Sort nodes with maximum free resources
        nodes = self._cluster.get_nodes() 
        sorted_nodes = sorted(nodes, key=lambda x: (x.workload, abs(x._ngpus-job._ngpus)))
        #print('sorted_nodes: %s' % sorted_nodes)
        available_gpus = []
        for node in sorted_nodes:
            available_gpus.extend([gpu for gpu in node.get_gpus() if gpu.get_free_mem() >= job.get_occupied_mem() and len(gpu.get_jobs()) == 0])

        hp_gpus = []

        # Unused GPUs have a higher priority
        for gpu in available_gpus:
            if gpu.get_allocated_mem() == 0:
                hp_gpus.append(gpu)
        if len(hp_gpus) < job._ngpus:
            hp_gpus = available_gpus

        resource_lock.release()
        if len(hp_gpus) >= job._ngpus:
            selected_gpus = hp_gpus[:job._ngpus]
        
        return selected_gpus

    def run(self):
        while True:
            # Fetch from Database to get the current waiting jobs
            self.update_cluster_status()

            profiled_jobs = []
            pool_lock.acquire()
            while not job_pool.empty():
                j = job_pool.get()
                profiled_jobs.append(j)
            pool_lock.release()

            profiled_jobs.sort(key=lambda x:x.get_left_time())
            waiting_jobs = []
            for pj in profiled_jobs:
                job_info = pj
                hosts_fn, gpus_fn = self.allocate(job_info)
                if hosts_fn and gpus_fn:
                    job_info._status = JobStatus.SCHEDULING
                    scheduling_job_table.insert(job_info)
                    launch(job_info, hosts_fn, gpus_fn)
                    profiled_job_table.update_status(job_info._job_id, JobStatus.SCHEDULING)
                else:
                    waiting_jobs.append(pj)
            print('# of unscheduled jobs: ', len(waiting_jobs))
            if len(waiting_jobs) > 0:
                print(waiting_jobs[0])
            pool_lock.acquire()
            for j in waiting_jobs:
                job_pool.put(j)
            pool_lock.release()

            time.sleep(1)

class ResourceAllocatorSF(ResourceAllocatorSRTF):
    def run(self):
        while True:
            # Fetch from Database to get the current waiting jobs
            self.update_cluster_status()

            profiled_jobs = []
            pool_lock.acquire()
            while not job_pool.empty():
                j = job_pool.get()
                profiled_jobs.append(j)
            pool_lock.release()

            profiled_jobs.sort(key=lambda x:x.get_ngpus())
            waiting_jobs = []
            for pj in profiled_jobs:
                job_info = pj
                hosts_fn, gpus_fn = self.allocate(job_info)
                if hosts_fn and gpus_fn:
                    job_info._status = JobStatus.SCHEDULING
                    scheduling_job_table.insert(job_info)
                    launch(job_info, hosts_fn, gpus_fn)
                    profiled_job_table.update_status(job_info._job_id, JobStatus.SCHEDULING)
                else:
                    waiting_jobs.append(pj)
            print('# of unscheduled jobs: ', len(waiting_jobs))
            if len(waiting_jobs) > 0:
                print(waiting_jobs[0])
            pool_lock.acquire()
            for j in waiting_jobs:
                job_pool.put(j)
            pool_lock.release()

            time.sleep(1)


class ResourceAllocatorSRSF(ResourceAllocatorSRTF):
    def run(self):
        while True:
            # Fetch from Database to get the current waiting jobs
            self.update_cluster_status()

            profiled_jobs = []
            pool_lock.acquire()
            while not job_pool.empty():
                j = job_pool.get()
                profiled_jobs.append(j)
            pool_lock.release()

            profiled_jobs.sort(key=lambda x:(x.get_left_time()*x.get_ngpus()))
            waiting_jobs = []
            for pj in profiled_jobs:
                job_info = pj
                hosts_fn, gpus_fn = self.allocate(job_info)
                if hosts_fn and gpus_fn:
                    job_info._status = JobStatus.SCHEDULING
                    scheduling_job_table.insert(job_info)
                    launch(job_info, hosts_fn, gpus_fn)
                    profiled_job_table.update_status(job_info._job_id, JobStatus.SCHEDULING)
                else:
                    waiting_jobs.append(pj)
            print('# of unscheduled jobs: ', len(waiting_jobs))
            if len(waiting_jobs) > 0:
                print(waiting_jobs[0])
            pool_lock.acquire()
            for j in waiting_jobs:
                job_pool.put(j)
            pool_lock.release()

            time.sleep(1)


class ResourceAllocatorTirasias(ResourceAllocatorSRTF):
    def migrating_job(self, job):
        scheduling_job_table.update_job_status(job.get_id(), JobStatus.MIGRATING)

    def _find_index(self, queue, job_id):
        for i, (jid, t) in enumerate(queue):
            if job_id == jid:
                return i
        return -1

    def _del_with_indexes(self, l, indexes):
        for i in sorted(indexes, reverse=True):
            l.pop(i)

    def run(self):
        self._queue0 = [] # Pending jobs from submission
        self._queue1 = [] # Pending jobs from migration
        self._all_jobs = {}
        all_jobs = self._all_jobs
        queue0 = self._queue0
        queue1 = self._queue1
        running_time_thres = 60*8
        solve_starvation = 1

        logline = ""
        while True:
            newline = ""
            # Fetch from Database to get the current waiting jobs
            self.update_cluster_status()
            newline += self._cluster.get_resources_info()
            newline += '\n'

            pool_lock.acquire()
            while not job_pool.empty():
                j = job_pool.get()
                queue0.append((j.get_id(), time.time())) # Add pending jobs
                all_jobs[j.get_id()] = j
            pool_lock.release()

            running_jobs = scheduling_job_table.query()

            for rj in running_jobs:
                all_jobs[rj.get_id()] = rj
                if rj.get_status() == JobStatus.SCHEDULING:
                    gputime = rj.get_accumulate_running_time() * rj._ngpus
                    if gputime >= running_time_thres:
                        newline += 'Job: %s should be migrated.\n' % rj
                        queue1.append((rj.get_id(), time.time())) # Should be migrating

            for pj_id, t in queue1:
                pj = all_jobs[pj_id]
                if pj.get_status() != JobStatus.SCHEDULING: # Pending jobs
                    pending_time = time.time() - t
                    if solve_starvation > 0 and pending_time >= pj.get_accumulate_running_time() * self._solve_starvation:
                        idx = self._find_index(queue1, pj.get_id()) 
                        queue1.pop(idx)
                        queue0.append((pj.get_id(), time.time()))
            
            waiting_jobs = []
            poped_indexes = []
            for i, (pj, t) in enumerate(queue0):
                job_info = all_jobs[pj]
                hosts_fn, gpus_fn = self.allocate(job_info)
                if hosts_fn and gpus_fn:
                    job_info._status = JobStatus.SCHEDULING
                    scheduling_job_table.insert(job_info)
                    launch(job_info, hosts_fn, gpus_fn)
                    profiled_job_table.update_status(job_info._job_id, JobStatus.SCHEDULING)
                    poped_indexes.append(i)
                else:
                    waiting_jobs.append(job_info)
            self._del_with_indexes(queue0, poped_indexes)

            poped_indexes = []
            for i, (pj, t) in enumerate(queue1):
                job_info = all_jobs[pj]
                if job_info.get_status() == JobStatus.SCHEDULING:
                    if len(waiting_jobs) > 0:
                        newline += 'Job: %s is being migrated.\n' % job_info.get_id()
                        self.migrating_job(job_info)
                    poped_indexes.append(i)
                else:
                    hosts_fn, gpus_fn = self.allocate(job_info)
                    if hosts_fn and gpus_fn:
                        job_info._status = JobStatus.SCHEDULING
                        scheduling_job_table.insert(job_info)
                        launch(job_info, hosts_fn, gpus_fn)
                        profiled_job_table.update_status(job_info._job_id, JobStatus.SCHEDULING)
                        poped_indexes.append(i)
                    else:
                        waiting_jobs.append(job_info)
            self._del_with_indexes(queue1, poped_indexes)

            newline += 'Q0: %s.\n' % queue0
            newline += 'Q1: %s.\n' % queue1
            newline += '# of unscheduled jobs: %d.' % len(waiting_jobs)
            if newline != logline:
                logline = newline
                print(datetime.datetime.now())
                print(logline)

            time.sleep(10)


if __name__ == '__main__':

    #ra = ResourceAllocatorGreedy(clust)
    ra = ResourceAllocatorTirasias(clust)
    ra.run()

