import redis
import pickle
import time
import threading
import random
import argparse, os
import ddl_platform.common.settings as settings

from queue import Queue
from ddl_platform.ddlib.communicator import CommMsg
from ddl_platform.database.scheduling_job_table import scheduling_job_table
from ddl_platform.ddlib.job import JobTaskStatus
from ddl_platform.ddlib.job import JobStatus, JobTaskStatus

from ddl_platform.scheduler.cluster import Cluster

INTERVAL =10


class SchedulerBase():
    def __init__(self, table, tunel, cluster=None):
        self.table = table
        self.tunel = tunel
        self.cluster = cluster
        self.name = 'scheduler_base'

    def get_jobs(self):
        jobs = self.table.query()
        jobs.sort(key=lambda x: x._arrival_time)
        return jobs

    def publish_msg_to_job(self, job, msg):
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        self.tunel.publish(channel, msg)

    def run(self):
      while True:
        jobs = self.get_jobs() 

        for job in jobs:
            for i, worker_status in enumerate(job._worker_statuses):
                if worker_status == JobTaskStatus.BEGINING:
                    self.table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
                elif worker_status == JobTaskStatus.COMPUTATION_WAITING:
                    self.table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
                elif worker_status == JobTaskStatus.COMMUNICATION_WAITING:
                    self.table.update_worker_status(job._job_id, i, JobTaskStatus.COMMUNICATING)
                elif worker_status == JobTaskStatus.FINISHED:
                    self.table.update_worker_status(job._job_id, i, JobTaskStatus.END)
                if worker_status in [JobTaskStatus.BEGINING, 
                                     JobTaskStatus.COMPUTATION_WAITING,
                                     JobTaskStatus.COMMUNICATION_WAITING,
                                     JobTaskStatus.FINISHED]:
                    self.publish_msg_to_job(job, pickle.dumps({'wait_iterval': [100, 0, 10, 1000, -1], 'updated_bs': 32, 'interval': 1*60, '_timestamp': time.time()}))
        time.sleep(0.001)


class SchedulerCommAvoid(SchedulerBase):
    def run(self):
        while True:
            jobs = self.get_jobs()
            for job in jobs:
                n_compute_waiting = 0
                n_communicate_waiting = 0
                n_communicating  = 0
                for i, worker_status in enumerate(job._worker_statuses):
                    if worker_status == JobTaskStatus.BEGINING:
                        self.table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
                        self.publish_msg_to_job(job, CommMsg.CONTINUE)
                    elif worker_status == JobTaskStatus.COMPUTATION_WAITING:
                        n_compute_waiting += 1
                        self.cluster.free_comm(job.get_node_ids())
                        self.table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
                        self.publish_msg_to_job(job, CommMsg.CONTINUE)
                    elif worker_status == JobTaskStatus.COMMUNICATION_WAITING:
                        n_communicate_waiting += 1
                        if self.cluster.check_comm(job.get_node_ids()):
                            self.table.update_worker_status(job._job_id, i, JobTaskStatus.COMMUNICATING)
                            self.publish_msg_to_job(job, CommMsg.CONTINUE)
                    elif worker_status == JobTaskStatus.COMMUNICATING:
                        n_communicating += 1
                    elif worker_status == JobTaskStatus.FINISHED:
                        self.cluster.free_comm(job.get_node_ids())
                        self.table.update_worker_status(job._job_id, i, JobTaskStatus.END)
                        self.publish_msg_to_job(job, CommMsg.CONTINUE)
                if n_communicate_waiting + n_communicating == len(job._worker_statuses):
                    self.cluster.lock_comm(job.get_node_ids())
            time.sleep(1e-6)


class SchedulerFIFO(SchedulerBase):
    def __init__(self, table, tunel, cluster=None):
        super().__init__(table, tunel, cluster)
        self.name = 'FIFO'

    def run(self):
        interval = INTERVAL
        while True:
            jobs = self.get_jobs()
            print('current determine: ', jobs)
            if len(jobs) > 0:
                gpu_jobs = {}
                candidate_jobs = set()
                jobs.sort(key=lambda x: x._arrival_time)
                occupied_gpus = set()
                for j in jobs:
                    gpu_ids = j.get_gpu_ids()
                    is_other_job_on_gpus = False
                    for gpu_id in gpu_ids:
                        if gpu_id in occupied_gpus:
                            is_other_job_on_gpus = True
                            break
                    for gpu_id in gpu_ids:
                        occupied_gpus.add(gpu_id)
                    if not is_other_job_on_gpus:
                        candidate_jobs.add(j)

                print('Controlled jobs: ', candidate_jobs)
                for job in candidate_jobs:
                    self.publish_msg_to_job(job, pickle.dumps({'wait_iterval': [100, 0, 10, 1000, -1], 'updated_bs': job._batch_size, 'interval': interval, '_timestamp': time.time()}))
            time.sleep(interval)


class SchedulerSRSF(SchedulerBase):
    def get_shared_gpus(self, job1, job2):
        gpus1 = job1.get_gpu_ids()
        gpus2 = job2.get_gpu_ids()
        shared_gpuids = list(set(gpus1) & set(gpus2))
        return [self.cluster.get_gpu(gid) for gid in shared_gpuids]

    def run(self):
        interval = INTERVAL
        while True:
            jobs = self.get_jobs()
            print('All jobs: ', jobs)
            if len(jobs) > 0:
                gpu_jobs = {}
                candidate_jobs = {}
                occupied_gpus = set()
                excluded_jobs = set()
                candidate_pair_jobs = set()
                candidate_single_jobs = set()
                share_gpu_jobs = set()

                for j in jobs:
                    gpu_ids = j.get_gpu_ids()
                    for gpu_id in gpu_ids:
                        if gpu_id not in gpu_jobs:
                            gpu_jobs[gpu_id] = []
                        gpu_jobs[gpu_id].append(j)
                        if len(gpu_jobs[gpu_id]) > 1:
                            share_gpu_jobs.update(gpu_jobs[gpu_id])

                for gpu_id in gpu_jobs:
                    jobs = gpu_jobs[gpu_id]
                    if len(jobs) > 1:
                        jobs.sort(key=lambda x: x._arrival_time)
                        candidate_pair_jobs.add(tuple(jobs))
                    elif len(jobs) == 1 and jobs[0] not in share_gpu_jobs:
                        candidate_single_jobs.add(jobs[0])

                for jobs in candidate_pair_jobs:
                    job1, job2 = jobs[0], jobs[1]
                    if job1 in candidate_jobs or job2 in candidate_jobs:
                        if job1 in candidate_jobs and job2 not in candidate_jobs:
                            excluded_jobs.add(job2)
                        elif job1 not in candidate_jobs and job2 in candidate_jobs:
                            excluded_jobs.add(job1)
                    else:
                        if job1 in excluded_jobs and job2 in excluded_jobs:
                            continue
                        elif job1 in excluded_jobs and job2 not in excluded_jobs:
                            candidate_jobs[job2] = job2.get_updated_bs()
                        elif job1 not in excluded_jobs and job2 in excluded_jobs:
                            candidate_jobs[job1] = job1.get_updated_bs()
                        else:
                            job1left_time = job1.get_left_time()
                            job2left_time = job2.get_left_time()
                            if job1left_time < job2left_time:
                                candidate_jobs[job1] = job1.get_updated_bs()
                                excluded_jobs.add(job2)
                            else:
                                candidate_jobs[job2] = job2.get_updated_bs()
                                excluded_jobs.add(job1)

                for job in candidate_single_jobs:
                    if job not in candidate_jobs and job not in excluded_jobs:
                        candidate_jobs[job] = job.get_bs() 

                print('Controlled jobs: ', candidate_jobs.keys())
                for cj in candidate_jobs:
                    migration = False
                    if cj.get_status() == JobStatus.MIGRATING:
                        migration = True
                    cmd = {'wait_iterval': [100, 0, 10, 1000, -1], 'interval': interval, '_timestamp': time.time(), 'migration': migration}
                    cmd['updated_bs'] = candidate_jobs[cj]
                    self.publish_msg_to_job(cj, pickle.dumps(cmd))
            time.sleep(interval)


class SchedulerInterval(SchedulerBase):
    def run(self):
        interval = INTERVAL
        while True:
            jobs = self.get_jobs()
            print('Controlled jobs: ', jobs)
            for job in jobs:
                migration = False
                if job.get_status() == JobStatus.MIGRATING:
                    migration = True
                self.publish_msg_to_job(job, pickle.dumps({'wait_iterval': [100, 0, 10, 1000, -1], 'updated_bs': job._batch_size, 'interval': interval, '_timestamp': time.time(), 'migration': migration}))
            time.sleep(interval)


class SchedulerDynamicBS(SchedulerBase):
    def determine_bs_for_two_jobs(self, job1, job2, gpu):
        total_mem = gpu.get_total_mem() * 0.75
        job1_avail_bs_mems = job1.get_avail_bs_mems()
        job2_avail_bs_mems = job2.get_avail_bs_mems()
        job1cbs = job1.get_updated_bs()
        job2cbs = job2.get_updated_bs()
        job1mem = job1_avail_bs_mems.get(job1cbs, total_mem)
        job2mem = job2_avail_bs_mems.get(job2cbs, total_mem)
        if job1mem + job2mem < total_mem:
            return job1cbs, job2cbs
        else:
            targetbs1 = job1cbs
            while targetbs1 >= 1:
                found_pair = False
                job1mem = job1_avail_bs_mems.get(targetbs1, total_mem)
                if job1mem == -1:
                    continue
                targetbs2 = job2cbs
                while targetbs2 >= 1:
                    job2mem = job2_avail_bs_mems.get(targetbs2, total_mem)
                    if job2mem == -1:
                        continue
                    if job2mem + job1mem < total_mem:
                        found_pair = True
                        break
                    targetbs2 = targetbs2 // 2
                if found_pair:
                    break
                targetbs1 = targetbs1 // 2
            if found_pair:
                if targetbs1 < job1cbs:
                    return targetbs1, None
                else:
                    return targetbs1, targetbs2
        return job1cbs, None

    def get_shared_gpus(self, job1, job2):
        gpus1 = job1.get_gpu_ids()
        gpus2 = job2.get_gpu_ids()
        shared_gpuids = list(set(gpus1) & set(gpus2))
        return [self.cluster.get_gpu(gid) for gid in shared_gpuids]

    def run(self):
        interval = INTERVAL
        while True:
            jobs = self.get_jobs()
            print('All jobs: ', jobs)
            if len(jobs) > 0:
                gpu_jobs = {}
                candidate_jobs = {}
                occupied_gpus = set()
                excluded_jobs = set()
                candidate_pair_jobs = set()
                candidate_single_jobs = set()
                share_gpu_jobs = set()

                for j in jobs:
                    gpu_ids = j.get_gpu_ids()
                    for gpu_id in gpu_ids:
                        if gpu_id not in gpu_jobs:
                            gpu_jobs[gpu_id] = []
                        gpu_jobs[gpu_id].append(j)
                        if len(gpu_jobs[gpu_id]) > 1:
                            share_gpu_jobs.update(gpu_jobs[gpu_id])

                for gpu_id in gpu_jobs:
                    jobs = gpu_jobs[gpu_id]
                    if len(jobs) > 1:
                        jobs.sort(key=lambda x: x._arrival_time)
                        candidate_pair_jobs.add(tuple(jobs))
                    elif len(jobs) == 1 and jobs[0] not in share_gpu_jobs:
                        candidate_single_jobs.add(jobs[0])

                for jobs in candidate_pair_jobs:
                    job1, job2 = jobs[0], jobs[1]
                    if job1 in candidate_jobs or job2 in candidate_jobs:
                        if job1 in candidate_jobs and job2 not in candidate_jobs:
                            excluded_jobs.add(job2)
                        elif job1 not in candidate_jobs and job2 in candidate_jobs:
                            excluded_jobs.add(job1)
                    else:
                        if job1 in excluded_jobs and job2 in excluded_jobs:
                            continue
                        elif job1 in excluded_jobs and job2 not in excluded_jobs:
                            candidate_jobs[job2] = job2.get_updated_bs()
                        elif job1 not in excluded_jobs and job2 in excluded_jobs:
                            candidate_jobs[job1] = job1.get_updated_bs()
                        else:
                            shared_gpus = self.get_shared_gpus(job1, job2)
                            gpu = shared_gpus[0]
                            job1bs, job2bs = self.determine_bs_for_two_jobs(job1, job2, gpu)
                            if job1bs is not None:
                                candidate_jobs[job1] = job1bs
                            else:
                                excluded_jobs.add(job1)
                            if job2bs is not None:
                                candidate_jobs[job2] = job2bs
                            else:
                                excluded_jobs.add(job2)

                for job in candidate_single_jobs:
                    if job not in candidate_jobs and job not in excluded_jobs:
                        candidate_jobs[job] = job.get_bs() 

                print('Controlled jobs: ', candidate_jobs.keys())
                for cj in candidate_jobs:
                    #cmd = {'wait_iterval': [100, 0, 10, 1000, -1], 'interval': interval, '_timestamp': time.time(), 'migration': True}
                    migration = False
                    if cj.get_status() == JobTaskStatus.MIGRATING:
                        migration = True
                    cmd = {'wait_iterval': [100, 0, 10, 1000, -1], 'interval': interval, '_timestamp': time.time(), 'migration': migration}
                    cmd['updated_bs'] = candidate_jobs[cj]
                    self.publish_msg_to_job(cj, pickle.dumps(cmd))
            time.sleep(interval)


def build_scheduer(table, r, clust):
    #return SchedulerInterval(table, r, clust)
    #return SchedulerFIFO(table, r, clust)
    return SchedulerDynamicBS(table, r, clust)
    #return SchedulerSRSF(table, r, clust)
