import redis
import time
import threading
import argparse, os
import ddl_platform.common.settings as settings

from queue import Queue
from ddl_platform.ddlib.communicator import CommMsg
from ddl_platform.database.scheduling_job_table import scheduling_job_table
from ddl_platform.ddlib.job import JobTaskStatus
from ddl_platform.scheduler.cluster import Cluster
from ddl_platform.ddlib.job import JobStatus, JobTaskStatus
#from scheduler_base import SchedulerBase, SchedulerCompAvoid
from scheduler_base import build_scheduler 

r = redis.Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB_INDEX)
print(r)

DEBUG = False

clust = Cluster()
comm_queue = Queue()
comp_queue = Queue()

comm_lock = threading.Lock()
comp_lock = threading.Lock()

class TaskCollector(threading.Thread):
  def __init__(self):
    super().__init__()

  def run(self):

    while True:
      jobs = scheduling_job_table.query()
      for job in jobs:
        #print('task_status: ', job._task_status)

        if job.get_status() == JobTaskStatus.COMPUTATION_WAITING:
          # release comm resources
          clust.free_comm(job.get_node_ids())
          # add the comp task into the queue
          comp_lock.acquire()
          comp_queue.put(job)
          comp_lock.release()

        elif job.get_status() == JobTaskStatus.COMMUNICATION_WAITING:
          # release comp resources
          clust.free_comp(job.get_node_ids(), job.get_gpu_ids())
          # add the comm task into the queue
          comm_lock.acquire()
          comm_queue.put(job)
          comm_lock.release()

        # Inqueue Jobs will not be operated next time
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        r.publish(channel, CommMsg.INQUEUE)
      time.sleep(0.001)

def simple_schedule():

  while True:
    jobs = scheduling_job_table.query()
    for job in jobs:
        if job._task_status in [JobTaskStatus.COMPUTATION_WAITING, JobTaskStatus.COMMUNICATION_WAITING]:
          channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
          r.publish(channel, CommMsg.CONTINUE)
    time.sleep(0.001)

def comp_avoid_schedule():

  while True:
    comp_lock.acquire()
    job = comp_queue.get()
    comp_lock.release()
    if clust.check_comp(job.get_node_ids(), job.get_gpu_ids()):
      clust.lock_comp(job.get_node_ids(), job.get_gpu_ids())
      channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
      r.publish(channel, CommMsg.CONTINUE)
    else:
      comp_lock.acquire()
      comp_queue.put(job)
      comp_lock.release()

def comp_simple_schedule():

  while True:
    jobs = scheduling_job_table.query()
    for job in jobs:
      finished_counter = 0
      for i, worker_status in enumerate(job._worker_statuses):
        channel = '/channel/scheduler-to-trainer/%s-%s' % (str(job._job_id), str(i))
        if worker_status == JobTaskStatus.COMPUTATION_WAITING:
          r.publish(channel, CommMsg.CONTINUE)
        elif worker_status  == JobTaskStatus.COMPUTATION_FINISHED:
          r.publish(channel, CommMsg.CONTINUE)
          finished_counter += 1
      if finished_counter == len(job._worker_statuses):
        clust.free_comp(job.get_node_ids(), job.get_gpu_ids())
      #if job.get_status() == JobTaskStatus.COMPUTATION_WAITING:
      #  # continue the comp task unconditionally
      #  r.publish(channel, CommMsg.CONTINUE)
      #elif job.get_status() == JobTaskStatus.COMPUTATION_FINISHED:
      #  clust.free_comp(job.get_node_ids(), job.get_gpu_ids())
      #  # continue the comp task unconditionally
      #  r.publish(channel, CommMsg.CONTINUE)
    time.sleep(0.001)


def comm_avoid_schedule():
  counter = 0
  while True:
    jobs = scheduling_job_table.query()
    for job in jobs:
      print(counter, job)
      finished_counter = 0
      waiting_counter = 0
      for i, worker_status in enumerate(job._worker_statuses):
        if worker_status == JobTaskStatus.COMMUNICATION_WAITING:
          waiting_counter += 1
        elif worker_status  == JobTaskStatus.COMMUNICATION_FINISHED:
          finished_counter += 1

      if waiting_counter == len(job._worker_statuses):
        if clust.check_comm(job.get_node_ids()):
          for i, worker_status in enumerate(job._worker_statuses):
            channel = '/channel/scheduler-to-trainer/%s-%d' % (str(job._job_id), i)
            r.publish(channel, CommMsg.CONTINUE)

      if finished_counter == len(job._worker_statuses):
        clust.free_comm(job.get_node_ids())
        #channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        #r.publish(channel, CommMsg.CONTINUE)
        for i, worker_status in enumerate(job._worker_statuses):
          channel = '/channel/scheduler-to-trainer/%s-%d' % (str(job._job_id), i)
          r.publish(channel, CommMsg.CONTINUE)
    print()
    counter += 1
    time.sleep(1)

    
def comm_simple_schedule():

  while True:
    jobs = scheduling_job_table.query()
    for job in jobs:
      channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
      if job.get_status() == JobTaskStatus.COMMUNICATION_WAITING:
        # continue the comp task unconditionally
        r.publish(channel, CommMsg.CONTINUE)
      elif job.get_status() == JobTaskStatus.COMMUNICATION_FINISHED:
        clust.free_comm(job.get_node_ids())
        # continue the comp task unconditionally
        r.publish(channel, CommMsg.CONTINUE)
    time.sleep(0.001)

    #comm_lock.acquire()
    #job = comm_queue.get()
    #comm_lock.release()
    ## continue the comm task unconditionally
    #channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
    #r.publish(channel, CommMsg.CONTINUE)


def comp_simple_comm_avoid_schedule():
  print('Running Comm-Avoid Scheduler...')
  counter = 0
  while True:
    jobs = scheduling_job_table.query()
    for job in jobs:
      if DEBUG:
        print(counter, job)
      beginning_counter = 0
      comp_waiting_counter = 0
      comm_waiting_counter = 0
      finished_counter = 0
      for i, worker_status in enumerate(job._worker_statuses):
        channel = '/channel/scheduler-to-trainer/%s-%d' % (str(job._job_id), i)
        if worker_status == JobTaskStatus.BEGINING:
          beginning_counter += 1
        if worker_status == JobTaskStatus.COMPUTATION_WAITING:
          comp_waiting_counter += 1
        elif worker_status == JobTaskStatus.COMMUNICATION_WAITING:
          comm_waiting_counter += 1
        elif worker_status == JobTaskStatus.FINISHED:
          finished_counter += 1

      if beginning_counter == len(job._worker_statuses):
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        for i, worker_status in enumerate(job._worker_statuses):
          scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
        r.publish(channel, CommMsg.CONTINUE)

      if comp_waiting_counter == len(job._worker_statuses):
        clust.free_comm(job.get_node_ids())
        clust.lock_comp(job.get_node_ids(), job.get_gpu_ids())
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        for i, worker_status in enumerate(job._worker_statuses):
          scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
        r.publish(channel, CommMsg.CONTINUE)

      if comm_waiting_counter == len(job._worker_statuses):
        if clust.check_comm(job.get_node_ids()):
          clust.lock_comm(job.get_node_ids())
          for i, worker_status in enumerate(job._worker_statuses):
            scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.COMMUNICATING)
          channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
          r.publish(channel, CommMsg.CONTINUE)

      if finished_counter == len(job._worker_statuses):
        clust.free_comm(job.get_node_ids())
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        for i, worker_status in enumerate(job._worker_statuses):
          scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.END)
        r.publish(channel, CommMsg.CONTINUE)
    if DEBUG:
      print()
    counter += 1
    time.sleep(0.001)


def comp_simple_comm_simple_schedule():
  print('Running Simple Scheduler...')
  counter = 0
  while True:
    jobs = scheduling_job_table.query()

    for job in jobs:
      if DEBUG:
        print(counter, job)
      beginning_counter = 0
      comp_waiting_counter = 0
      comm_waiting_counter = 0
      finished_counter = 0
      for i, worker_status in enumerate(job._worker_statuses):
        channel = '/channel/scheduler-to-trainer/%s-%d' % (str(job._job_id), i)
        if worker_status == JobTaskStatus.BEGINING:
          beginning_counter += 1
        if worker_status == JobTaskStatus.COMPUTATION_WAITING:
          comp_waiting_counter += 1
        elif worker_status == JobTaskStatus.COMMUNICATION_WAITING:
          comm_waiting_counter += 1
        elif worker_status == JobTaskStatus.FINISHED:
          finished_counter += 1

      if beginning_counter == len(job._worker_statuses):
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        for i, worker_status in enumerate(job._worker_statuses):
          scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
        r.publish(channel, CommMsg.CONTINUE)

      if comp_waiting_counter == len(job._worker_statuses):
        clust.free_comm(job.get_node_ids())
        clust.lock_comp(job.get_node_ids(), job.get_gpu_ids())
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        for i, worker_status in enumerate(job._worker_statuses):
          scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.COMPUTING)
        r.publish(channel, CommMsg.CONTINUE)

      if comm_waiting_counter == len(job._worker_statuses):
        #if clust.check_comm(job.get_node_ids()):
        clust.lock_comm(job.get_node_ids())
        for i, worker_status in enumerate(job._worker_statuses):
          scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.COMMUNICATING)
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        r.publish(channel, CommMsg.CONTINUE)

      if finished_counter == len(job._worker_statuses):
        clust.free_comm(job.get_node_ids())
        channel = '/channel/scheduler-to-trainer/%s' % str(job._job_id)
        for i, worker_status in enumerate(job._worker_statuses):
          scheduling_job_table.update_worker_status(job._job_id, i, JobTaskStatus.END)
        r.publish(channel, CommMsg.CONTINUE)
    if DEBUG:
      print()
    counter += 1
    time.sleep(1e-6)


    
if __name__ == '__main__':
    #parser = argparse.ArgumentParser(description="DDL-Platform Scheduler")
    #parser.add_argument('--debug', action='store_true', default=False,
    #                help='Use debug mode')
    #simple_schedule()
    #comp_simple_comm_avoid_schedule()
    #comp_simple_comm_simple_schedule()
    scheduler = build_scheduer(scheduling_job_table, r, clust)
    scheduler.run()

    #TaskCollector().start()
    #comp_thread = threading.Thread(target=comp_simple_schedule) # thread that schedules compute tasks
    #comm_thread = threading.Thread(target=comm_avoid_schedule) # thread that schedules communication tasks
    #comm_thread = threading.Thread(target=comm_simple_schedule) # thread that schedules communication tasks
    #comp_thread.start()
    #comm_thread.start()
