# Copyright (c) 2017 Presto Labs Pte. Ltd.
# Author: whpark

import getpass
import json
import logging
import os
import subprocess

import paramiko

from absl import flags

FLAGS = flags.FLAGS

flags.DEFINE_string("slurm_job_name_prefix", "portfolio_operations", "job name prefix")

flags.DEFINE_string('slurm_login_node', 'slurm19-login-01.iosg.corp.prestolabs.io', '')


class SlurmJobManager(object):
  def __init__(self, working_dir, slurm_job_name_prefix=None, slurm_login_node=None, run_as=None):
    self._logger = logging.getLogger(__name__)
    slurm_login_node = slurm_login_node or FLAGS.slurm_login_node
    self.slurm_job_name_prefix_ = slurm_job_name_prefix or FLAGS.slurm_job_name_prefix
    self.slurm_login_node_ = slurm_login_node
    self.working_dir = working_dir

    self.client = paramiko.SSHClient()
    self.client.load_system_host_keys()
    self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    self.username = getpass.getuser()
    if getpass.getuser() == 'bot-alpha-1':
      self.client.connect(
          self.slurm_login_node_,
          key_filename='/remote/iosg/data/etc/general_operations/ssh/bot-alpha-1/id_rsa')
    elif getpass.getuser() == 'bot-cache-1':
      self.client.connect(
          self.slurm_login_node_,
          key_filename='/remote/iosg/data/etc/general_operations/ssh/bot-cache-1/id_rsa')
    elif getpass.getuser() == 'bot-tse':
      self.client.connect(
          self.slurm_login_node_,
          username='bot-cache-1',
          key_filename='/remote/iosg/data/etc/general_operations/ssh/bot-tse/id_rsa')
    else:
      if run_as is None:
        self.client.connect(self.slurm_login_node_)
      else:
        self.username = run_as
        self.client.connect(self.slurm_login_node_, username=run_as)

  # return text output. caller must parse it properly.
  def exec_(self, cmd):
    _, stdout, _ = self.client.exec_command(cmd)
    return stdout.read()

  # return jobid
  def exec_sbatch_(self, cmd):
    _, stdout, _ = self.client.exec_command(cmd)
    output = stdout.read().decode('utf-8').strip()
    try:
      job_id = int(output.split(' ')[-1])  # other way?
    except Exception:
      print("cmd:", cmd)
      print("output:", output)
      raise
    return job_id

  def get_node_info(self):
    # reference: https://slurm.schedmd.com/sinfo.html
    cmd = 'sinfo -h --format="%n,%c,%T"'
    output = self.exec_(cmd).decode('utf_8')
    return [tuple(line.split(',')) for line in output.split('\n') if line]

  def get_available_nodes(self):
    node_info = self.get_node_info()
    # reference: https://slurm.schedmd.com/sinfo.html
    available_states = ['ALLOCATED', 'ALLOCATED+', 'COMPLETING', 'IDLE', 'MIXED']
    return [row[0] for row in node_info if row[2].upper() in available_states]

  def get_job_info(self):
    seperator = '||'
    fmt = seperator.join(["%%" + c for c in 'AujZTtVMBrRS'])
    cmd = 'squeue -r --format="%s"' % fmt
    output = self.exec_(cmd)
    result = [line.split(seperator) for line in output.split('\n') if line]
    keys = result[0]
    unique_keys = []
    for key in keys:
      while key in unique_keys:
        key += "_"
      unique_keys.append(key)
    keys = unique_keys
    rows = result[1:]
    result = [{keys[i]: row[i] for i in range(len(keys))} for row in rows]
    return [
        row for row in result
        if row['NAME'].startswith(self.slurm_job_name_prefix_) and row['USER'] == self.username
    ]

  def get_job_id(self, job):
    if isinstance(job, SlurmJob):
      job_id = job.job_id()
    elif isinstance(job, int):
      job_id = job
    elif isinstance(job, str) and job.isdigit():
      job_id = int(job)
    elif job is None:
      job_id = None
    else:
      raise ValueError(job)
    return job_id

  def start_job(self, job):
    self._logger.info("Starting %s[%s]" % (job.job_name(), job.job_id()))
    if job.is_trigger():
      cmd = "scontrol release %d" % (job.job_id())
      out = self.exec_(cmd)
      return out
    else:
      return False

  def cancel_job_(self, job):
    job_id = self.get_job_id(job)
    self._logger.info("Deleting %s" % job_id)

    cmd = "scancel %d" % (job_id)
    out = self.exec_(cmd)
    return out

  def cancel_all(self):
    jobs = [job['JOBID'] for job in self.get_job_info()]
    for job in jobs:
      self.cancel_job_(job)

  def add_job_(self, job, deploy_only=False):
    self._logger.info("Adding %s" % job.job_name())
    if not deploy_only:
      assert job.trigger_job != job.dependent_job, job.job_name()
      assert job.job_id() is None

    if isinstance(job, SlurmArrayJob):
      for child_job in job.jobs:
        cmd_file = "command/%s" % child_job.job_name()
        with open(os.path.join(self.working_dir, cmd_file), 'w') as a_file:
          a_file.write(child_job.get_command())

      cmd_file = "command/%s" % job.job_name()
      with open(os.path.join(self.working_dir, cmd_file), 'w') as a_file:
        a_file.write("\n".join(["command/%s" % child_job.job_name() for child_job in job.jobs]))
        a_file.write("\n")

      array = '1-%d' % len(job.jobs)
      if job.max_active_tasks:
        array += '%%%d' % job.max_active_tasks
      params = {
          'workdir': self.working_dir,
          'ntasks': 1,
          'export': "CMD_FILE='%s'" % cmd_file,
          'array': array,
          'mem': job.mem,
          'cpus-per-task': job.cpu,
          'job-name': job.job_name(),
          'output': '"output/%s-%%a"' % job.job_name(),
      }

      params['dependency'] = ",".join([
          "%s:%s" % (key, ":".join([str(j.job_id()) for j in value])) for key,
          value in job.dependency.items()
      ])

      if job.constraint is not None:
        params['constraint'] = '"[%s]"' % job.constraint

      param_str = " ".join([
          '--%s=%s' % (key, value) if value is not None else '--%s' % key for key,
          value in params.items()
      ])

      cmd = 'sbatch %s %s' % (param_str, os.path.join(self.working_dir, 'run_array.sbatch'))

    else:
      cmd_file = "command/%s" % job.job_name()
      with open(os.path.join(self.working_dir, cmd_file), 'w') as a_file:
        a_file.write(job.get_command())

      params = {
          'workdir': self.working_dir,
          'ntasks': 1,
          'export': "CMD_FILE='%s'" % cmd_file,
          'mem': job.mem,
          'cpus-per-task': job.cpu,
          'job-name': job.job_name(),
          'output': 'output/%s' % job.job_name(),
      }
      if job.is_trigger():
        params['hold'] = None
      else:
        params['dependency'] = ",".join([
            "%s:%s" % (key, ":".join([str(j.job_id()) for j in value])) for key,
            value in job.dependency.items()
        ])

      if job.nodelist is not None:
        params['nodelist'] = job.nodelist

      if job.constraint is not None:
        params['constraint'] = '"[%s]"' % job.constraint

      param_str = " ".join([
          '--%s=%s' % (key, value) if value is not None else '--%s' % key for key,
          value in params.items()
      ])

      cmd = 'sbatch %s %s' % (param_str, os.path.join(self.working_dir, 'run.sbatch'))

    if deploy_only:
      return
    job_id = self.exec_sbatch_(cmd)
    job.set_job_id(job_id)
    self._logger.info("Added  %s[%s]" % (job.job_name(), job.job_id()))
    return job_id

  def add_jobs_impl_(self, job, jobs):
    if job.job_id() is not None:
      return
    assert job in jobs
    for dependency_jobs in job.dependency.values():
      for dependency_job in dependency_jobs:
        self.add_jobs_impl_(dependency_job, jobs)
    self.add_job_(job)

  def get_prepared_job_info_file(self):
    return os.path.join(self.working_dir, 'prepared_jobs.json')

  def save_prepared_job_info(self, jobs):
    with open(self.get_prepared_job_info_file(), 'w') as a_file:
      a_file.write(
          json.dumps([{
              'JOBID': job.job_id(), 'NAME': job.job_name()
          } for job in jobs], indent=2))

  def load_prepared_job_info(self):
    return load_json_file(self.get_prepared_job_info_file())

  def deploy_(self):
    os.makedirs(self.working_dir)
    for src in ['run.sbatch', 'run', 'run_array', 'run_array.sbatch']:
      dst = os.path.join(self.working_dir, src)
      deploy_cmd = 'cp %s %s' % (os.path.join('coin/tool/slurm/files', src), dst)
      assert subprocess.call(deploy_cmd, shell=True) == 0, 'slurm deploy failed.'
      chmod_cmd = 'chmod a+x %s' % dst
      assert subprocess.call(chmod_cmd, shell=True) == 0, 'slurm deploy failed.'
    os.mkdir(os.path.join(self.working_dir, 'command'))
    os.mkdir(os.path.join(self.working_dir, 'output'))
    for dir_path in [
        self.working_dir,
        os.path.join(self.working_dir, 'command'),
        os.path.join(self.working_dir, 'output')
    ]:
      chmod_cmd = 'chmod a+xw %s' % dir_path
      assert subprocess.call(chmod_cmd, shell=True) == 0, 'slurm deploy failed.'

  def prepare_jobs(self, jobs, deploy_only=False):
    self._logger.info("Preparing %d jobs..." % len(jobs))
    self.deploy_()
    self.add_jobs_(jobs, deploy_only)
    self.save_prepared_job_info(jobs)
    self._logger.info("Working_dir: %s" % self.working_dir)

  def add_jobs_(self, jobs, deploy_only=False):
    if deploy_only:
      for job in jobs:
        self.add_job_(job, deploy_only)
    else:
      for job in jobs:
        self.add_jobs_impl_(job, jobs)


class SlurmJob(object):
  def __init__(self,
               name,
               command,
               mem=None,
               cpu=None,
               trigger_job=False,
               nodelist=None,
               constraint=None,
               slurm_job_name_prefix=None):
    self.slurm_job_name_prefix_ = slurm_job_name_prefix or FLAGS.slurm_job_name_prefix
    self.set_name(name)
    self.command = '#!/bin/bash\n'
    self.command += "printf '[hostname:%%s]\\n' $(hostname --long)\n%s" % command
    self.cpu = int(cpu) if cpu is not None else 1
    self.mem = int(mem) if mem is not None else 1024
    self.nodelist = nodelist
    self.set_constraint(constraint)

    self.job_id_ = None
    self.dependent_job = False
    self.trigger_job = trigger_job
    self.dependency = {}

  def is_trigger(self):
    return self.trigger_job

  def set_job_id(self, job_id):
    assert self.job_id_ is None
    self.job_id_ = job_id

  def job_id(self):
    return self.job_id_

  def job_name(self):
    return self.name

  def set_dependency(self, dependency_type, jobs):
    assert not self.trigger_job, self
    if not isinstance(jobs, list):
      jobs = [jobs]

    for j in jobs:
      assert isinstance(j, SlurmJob), (self, j)

    self.dependency[dependency_type] = list(set(jobs))
    self.dependent_job = True
    return self

  def set_constraint(self, constraint):
    self.constraint = constraint

  def set_afterok(self, jobs):
    self.set_dependency('afterok', jobs)

  def set_after(self, jobs):
    self.set_dependency('after', jobs)

  def __str__(self):
    return "%s '%s' %s" % (self.name, self.command, self.job_id())

  def get_command(self):
    return self.command

  def set_name(self, name):
    self.name = "-".join([self.slurm_job_name_prefix_, name])


class SlurmArrayJob(SlurmJob):
  def __init__(self, name, jobs, cpu=None, mem=None, nodelist=None, max_active_tasks=None):
    self.jobs = jobs
    self.max_active_tasks = max_active_tasks
    SlurmJob.__init__(self, name, name, mem, cpu, False, nodelist)

  def __str__(self):
    return "%s '%s' %s" % (self.name, 'array_job', self.job_id())
