from absl import flags

import os
import datetime
import math
import subprocess
import signal
import tempfile
from concurrent.futures import (ProcessPoolExecutor, as_completed)


def chunks(args, n):
  for i in range(0, len(args), n):
    yield args[i:i + n]


def get_job_name_flag(prefix, jobname):
  if prefix == "":
    return ""
  elif jobname == "":
    return f"--job-name {prefix}"
  else:
    return f"--job-name {prefix[:2]}_{jobname}"


if not hasattr(flags.FLAGS, "nodelist"):
  flags.DEFINE_string('nodelist', None, '')

if not hasattr(flags.FLAGS, "partition"):
  flags.DEFINE_string('partition', None, '')

if not hasattr(flags.FLAGS, "hint"):
  flags.DEFINE_string('hint', None, '')

if not hasattr(flags.FLAGS, "exclude"):
  flags.DEFINE_string('exclude', None, '')

if not hasattr(flags.FLAGS, "time_measure"):
  flags.DEFINE_boolean('time_measure', None, '')

if not hasattr(flags.FLAGS, "split_cpu"):
  flags.DEFINE_integer('split_cpu', 100, '')


def process_pool_exec(nparallel, cmdargs):
  futures = {}
  try:
    with ProcessPoolExecutor(max_workers=nparallel) as executor:
      arg_iter = iter(cmdargs)
      while True:
        for arg in arg_iter:
          if arg is None:
            continue
          future = executor.submit(*arg)
          futures[future] = True
          if len(futures) >= nparallel:
            break
        if len(futures) == 0:
          break

        for future in as_completed(futures):
          future.result()
          del futures[future]
          del future
          break
  except KeyboardInterrupt:
    pass


slurm_jobs_sent = []


def cleanup_slurm_jobs():
  global slurm_jobs_sent
  print("Cleanup sent sjobs!")
  job_ids = ' '.join([str(job_id) for job_id in slurm_jobs_sent])
  scancel_cmd = f"scancel {job_ids}"
  print(scancel_cmd)
  subprocess.call(scancel_cmd, shell=True)
  slurm_jobs_sent = []


def batch_slurm(cmds,
                temp_dir,
                cpu,
                cpu_per_job=1,
                prefix="",
                jobnames=None,
                mem_per_cpu_gb=None,
                split_cpu=100,
                split_cpu_multiplier=2,
                time_limit_mins=None,
                partition_name=None,
                chdir=None,
                use_flags=True):
  """
  @param split_cpu: limit each SBATCH to use at most split_cpu
    e.g. cpu=800 split_cpu=100 will run 8 SBATCHs for your cmds
    default=100
  @param split_cpu_multiplier: default=4 in each SBATCH, limit the total number of cpu to be less
    than split_cpu_multiplier * cpu, if a job use 600 CPUs and SBATCH has 100, it will be break
    in to two sequential SBATCH with 400 and 200 to prevent too much command in one sbatch file
  @param time_limit_mins must be int
  """
  global slurm_jobs_sent
  if use_flags and flags.FLAGS.split_cpu:
    split_cpu = flags.FLAGS.split_cpu
  if cpu > split_cpu:
    nbreaks = int(math.ceil(cpu / split_cpu))
    assert nbreaks >= 2
    chunk_size = int(math.ceil(len(cmds) / nbreaks))
    cmdschunks = [cmdschunk for cmdschunk in chunks(cmds, chunk_size)]
    cmdargs = [(batch_slurm, cmdschunk, temp_dir, split_cpu, cpu_per_job, prefix, jobnames,
                mem_per_cpu_gb, split_cpu, split_cpu_multiplier, time_limit_mins, partition_name)
               for cmdschunk in cmdschunks]
    process_pool_exec(nbreaks, cmdargs)
    return

  if use_flags and hasattr(flags.FLAGS, "mem_per_cpu_gb"):
    mem_per_cpu_gb = mem_per_cpu_gb or flags.FLAGS.mem_per_cpu_gb
  else:
    mem_per_cpu_gb = mem_per_cpu_gb or 2

  if len(cmds) > split_cpu_multiplier * cpu:
    halfidx = int(len(cmds) / 2)
    batch_slurm(cmds[:halfidx], temp_dir, cpu, cpu_per_job, prefix, jobnames, mem_per_cpu_gb,
                split_cpu, split_cpu_multiplier, time_limit_mins, partition_name)
    batch_slurm(cmds[halfidx:], temp_dir, cpu, cpu_per_job, prefix, jobnames, mem_per_cpu_gb,
                split_cpu, split_cpu_multiplier, time_limit_mins, partition_name)
    return
  if len(cmds) == 0:
    return
  if use_flags and hasattr(flags.FLAGS, "debug") and flags.FLAGS.debug:
    cmds = cmds[:1]
  if jobnames is None:
    jobnames = ["" for cmd in cmds]
  sbatch_file_count = math.ceil(len(cmds) / 5000)
  jobs_per_file = math.ceil(len(cmds) / sbatch_file_count)
  cpu /= math.ceil(sbatch_file_count)

  job_ids = []
  envfile = f"{temp_dir}/env_export.txt"
  os.system(f"export > {envfile}")
  for cnt, jobsjns in enumerate(chunks(list(zip(cmds, jobnames)), jobs_per_file)):
    log_filepath = f"{temp_dir}/slurm-%d.txt"
    jobs, jns = zip(*jobsjns)
    job_file_fd, job_file = tempfile.mkstemp(
      suffix='.sh',
      prefix=f'{prefix}_{cnt}_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")}_',
      dir=temp_dir)
    os.close(job_file_fd)
    nparallel = max(min(int(cpu / cpu_per_job), len(jobs)), 1)
    with open(job_file, 'wt') as fh:
      fh.writelines("#!/bin/bash\n")
      fh.writelines(f"#SBATCH -o {temp_dir}/slurm-%j.txt\n")
      if use_flags and flags.FLAGS.nodelist is not None:
        fh.writelines(f"#SBATCH --nodelist={flags.FLAGS.nodelist}\n")
      if partition_name is not None:
        fh.writelines(f"#SBATCH --partition={partition_name}\n")
      elif use_flags and flags.FLAGS.partition is not None:
        fh.writelines(f"#SBATCH --partition={flags.FLAGS.partition}\n")
      if use_flags and flags.FLAGS.hint is not None:
        fh.writelines(f"#SBATCH --hint={flags.FLAGS.hint}\n")
      if use_flags and flags.FLAGS.exclude is not None:
        fh.writelines(f"#SBATCH --exclude={flags.FLAGS.exclude}\n")
      if time_limit_mins is not None:
        fh.writelines(f"#SBATCH --time={time_limit_mins}\n")
      if use_flags and prefix.startswith('analyze'):
        fh.writelines(f"#SBATCH --mem-per-cpu={flags.FLAGS.analyzer_mem}gb\n")
      if chdir is not None:
        fh.writelines(f"#SBATCH --chdir={chdir}\n")
      else:
        # otherwise memory exceeds slightly, in feed reader with many symbols, for example, ftx swap case.
        fh.writelines(f"#SBATCH --mem-per-cpu={mem_per_cpu_gb}gb\n")
      fh.writelines(f"# source {envfile}\n")
      fh.writelines(f"set -o pipefail\n")
      cnt = 0
      for i, (cmd, jobname) in enumerate(zip(jobs, jns)):
        if cmd is not None and cmd.startswith("export "):
          cmd = cmd.replace("export ", "--export=ALL,", 1).replace(';', ' ', 1)
        if cmd is not None:
          cnt += 1
          fh.writelines(
              f"srun -v -N 1 -n 1 -c {cpu_per_job} --hint=nomultithread {get_job_name_flag(prefix, jobname)} --exclusive --cpu-bind=cores"
              f" {cmd} &\n")
          if cnt % 1 == 0:
            fh.writelines("sleep 0.1\n")
      fh.writelines("wait\n")
      fh.writelines("echo 0 &> /dev/null")
    cmd_str = f"sbatch --hint=nomultithread --parsable -n {nparallel} -c {cpu_per_job} {job_file}"
    proc = subprocess.Popen(cmd_str.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (slurm_job_id, _) = proc.communicate()
    slurm_job_id = int(slurm_job_id)
    print(cmd_str)
    print(log_filepath % slurm_job_id)
    print(f'sim job id {slurm_job_id}')
    job_ids.append(slurm_job_id)
  job_ids_str = ':'.join([str(job_id) for job_id in job_ids])
  slurm_jobs_sent.extend(job_ids)
  if partition_name is not None:
    extra_echo_arg = f"--partition {partition_name}"
  elif use_flags and flags.FLAGS.partition is not None:
    extra_echo_arg = f"--partition {flags.FLAGS.partition}"
  else:
    extra_echo_arg = ""
  srun_cmd = f"srun --dependency=afterany:{job_ids_str} {extra_echo_arg} -n 1 echo DONE &> /dev/null && wait\n"
  if use_flags and flags.FLAGS.time_measure:
    srun_cmd = f"time {srun_cmd}\n"
  for slurm_job_id in slurm_jobs_sent:
    srun_cmd += f"""sacct -n -j {slurm_job_id} -o state%20 | sort | uniq -c\n"""
  try:
    subprocess.call(srun_cmd, shell=True)
  except KeyboardInterrupt:
    pass
  finally:
    cleanup_slurm_jobs()


def batch_local(
    cmds,
    cpu,
    temp_dir=None,
    cpus=None,
    cpu_per_job=1,
    prefix="",
    jobnames=None,
    mem_per_cpu_gb=None):
  if hasattr(flags.FLAGS, "debug") and flags.FLAGS.debug:
    cmds = cmds[:1]
  nparallel = max(min(int(cpu / cpu_per_job), len(cmds)), 1)

  if nparallel == 1:
    for arg in cmds:
      if arg is None:
        continue
      print(arg)
      os.system(arg)
  else:
    process_pool_exec(nparallel, [(os.system, cmd) for cmd in cmds])
