# Copyright (c) 2019 Presto Labs Pte. Ltd.
# Author: yuxuan

import datetime
import logging
import os
import sys
import traceback
import subprocess
from concurrent import futures
from concurrent.futures import ProcessPoolExecutor

from absl import flags

from coin.base.mail_util import send_mail
from coin.tool.slurm.job_group import JobGroup
from coin.tool.slurm.slurm import SlurmJobManager, SlurmJob

FLAGS = flags.FLAGS

flags.DEFINE_string('deployed_path', '/remote/iosg/home-2/yuxuan/workspace/coin', '')
flags.DEFINE_string('venv_dir', '/remote/iosg/home-2/yuxuan/miniconda3', '')
flags.DEFINE_string('slurm_working_dir', '/remote/iosg/home-2/yuxuan/coin/slurm/working_dir', '')
flags.DEFINE_string('report_email', 'yuxuan@prestolabs.io', '')
flags.DEFINE_string('config_file', '', '')
flags.DEFINE_string('filter_nodes', '', '')
flags.DEFINE_integer('max_cpu', 300, 'Positive values will throttle max active jobs.')
flags.DEFINE_boolean('run_local', False, '')
flags.DEFINE_string('api_override', '', '')
flags.DEFINE_string('okex_api_override', '', '')
flags.DEFINE_string('binance_api_override', '', '')
flags.DEFINE_string('feed_machine', '', '')
flags.DEFINE_boolean('skip_run_if_output_exists', False, '')
flags.DEFINE_string('trading_date', '', 'YYYYMMDD or YYYYMMDD-YYYYMMDD')
flags.DEFINE_boolean('run_from_csv', False, '')
flags.DEFINE_boolean('fastfeed', True, '')
flags.DEFINE_string('output_dir', '/remote/iosg/home-2/yuxuan/coin/sim_result', '')
flags.DEFINE_string('csv_feed_root', '/remote/iosg/home-2/yuxuan/coin/feed', '')
flags.DEFINE_string('base', '', '')


def enumerate_dates(start_date, end_date):
  cur_date = start_date
  while cur_date <= end_date:
    yield cur_date
    cur_date += datetime.timedelta(days=1)


def _get_trading_dates(date_str):
  assert ',' not in date_str
  trading_dates = []
  if '-' in date_str:
    start_date, end_date = date_str.split('-')
    trading_dates = [
        d for d in enumerate_dates(datetime.datetime.strptime(start_date, '%Y%m%d'),
                                   datetime.datetime.strptime(end_date, '%Y%m%d'))
    ]
    trading_dates = [int(d.strftime('%Y%m%d')) for d in trading_dates]
  else:
    trading_dates = [int(FLAGS.trading_date)]
  return trading_dates


def get_trading_dates(date_str):
  trading_dates = []
  for date_s in date_str.split(','):
    trading_dates += _get_trading_dates(date_s.strip())
  return trading_dates


def get_run_id():
  ts = int(datetime.datetime.now().timestamp() * 10**3)
  return ts


def send_email(name, subject, content):
  send_mail(name, 'coin-sim@joomo.io', FLAGS.report_email, subject, content)


def get_slurm_working_dir(prefix):
  return os.path.join(FLAGS.slurm_working_dir, '%s_%s' % (prefix, get_run_id()))


def params_to_str(param):
  return ' '.join(['--%s="%s"' % (key, val) for key, val in param.items()])


def get_a_command(module, param, local=False):
  file_path = '%s.py' % '/'.join(module.split('.'))
  deploy_working_dir = os.path.join(FLAGS.deployed_path, 'python')
  cmd = []
  cmd.append('env -i')
  cmd.append('export TZ=UTC')
  if not local:
    cmd.append('cd %s' % deploy_working_dir)
    cmd.append('source %s/bin/activate' % FLAGS.venv_dir)
    cmd.append('conda activate %s/envs/coin2_motion_env' % FLAGS.venv_dir)
  cmd.append(' '.join(['./pyrunner', file_path, params_to_str(param)]))
  cmd.append('deactivate')
  cmd = ' && '.join(cmd)
  return cmd


def get_email_command(name, subject, content):
  deploy_working_dir = os.path.join(FLAGS.deployed_path, 'python')
  cmd = ' && '.join([
      'env -i',
      'cd %s' % deploy_working_dir,
      'source %s/bin/activate' % FLAGS.venv_dir,
      'conda activate %s/envs/coin_env' % FLAGS.venv_dir,
      ' '.join([
          'python -m coin.tool.mailer',
          '--name="%s"' % name,
          '--mailfrom="coin-sim@joomo.io"',
          '--mailto="%s"' % FLAGS.report_email,
          '--subject="%s"' % subject,
          """ <<EOF
%s
EOF""" % content
      ])
  ])
  return cmd


def get_command():
  return ' '.join(sys.argv)


def get_email_content(slurm_working_dir=None):
  content = 'Thank you!'
  content += '\n\ncommand:\n%s' % get_command()
  if slurm_working_dir:
    content += '\n\nworking dir:\n%s' % slurm_working_dir
  return content


def get_srun_command(cmds):
  res_cmd = ''
  for cmd in cmds:
    res_cmd += 'srun --exclusive -N1 -n1 --mem-per-cpu=0 bash -c \'%s\' &\n' % cmd
  res_cmd += 'wait'
  return res_cmd


def get_sbatch_command(slurm_working_dir, job, cpu, hold=False, other_params=None):
  params = {
      'chdir': slurm_working_dir,
      'ntasks': cpu,
      'job-name': job.job_name(),
      'output': 'output/%s' % job.job_name()
  }
  if hold:
    params['hold'] = None
  if other_params:
    params.update(other_params)
  param_str = ' '.join(
      ['--%s=%s' % (key, val) if val is not None else '--%s' % key for key, val in params.items()])
  script = os.path.join(slurm_working_dir, 'command', job.job_name())
  cmd = 'sbatch %s %s' % (param_str, script)
  return cmd


def sbatch(cmd):
  output = subprocess.check_output([cmd], shell=True)
  output = output.decode('utf-8')
  job_id = output.strip().split(' ')[-1]
  return job_id


def release(job_id):
  subprocess.check_call(['scontrol release %s' % job_id], shell=True)


def update_config(config):
  if 'use_bitmex_cached_info' not in config:
    config['use_bitmex_cached_info'] = 1
  return config


def run_sbatch_on_slurm(job_name, module, params):
  logger = logging.getLogger(__name__)
  assert job_name, 'job_name is empty'
  try:
    logger.info('Submitting %s jobs' % len(params))
    slurm_working_dir = get_slurm_working_dir(job_name)
    logger.info('slurm_working_dir %s' % slurm_working_dir)
    job_manager = SlurmJobManager(slurm_working_dir)
    sim_cmds = [get_a_command(module, update_config(param)) for param in params]
    sim_job = SlurmJob('%s_all' % job_name, get_srun_command(sim_cmds))
    finish_job = SlurmJob(
        '%s_email' % job_name,
        get_email_command('Slurm Job %s' % job_name,
                          '%s finished' % job_name,
                          get_email_content(slurm_working_dir)))
    job_manager.prepare_jobs([sim_job, finish_job], deploy_only=True)
    sbatch_cmd = get_sbatch_command(slurm_working_dir,
                                    sim_job,
                                    min(len(params), FLAGS.max_cpu),
                                    True)
    sim_job_id = sbatch(sbatch_cmd)
    dependency = {'dependency': 'afterok:%s' % sim_job_id}
    sbatch_cmd = get_sbatch_command(slurm_working_dir, finish_job, 1, other_params=dependency)
    email_job_id = sbatch(sbatch_cmd)
    print('%s => %s' % (sim_job_id, email_job_id))
    release(sim_job_id)
    send_email('Slurm Job %s' % job_name, '%s submitted' % job_name, get_email_content())
  except Exception as e:
    print(e)
    traceback.print_exc()
    send_email('Slurm Job %s' % job_name, '%s submission failed' % job_name, traceback.format_exc())


def run_on_slurm(job_name, module, params, mem=None, cpu=None):
  logger = logging.getLogger(__name__)
  assert job_name, 'job_name is empty'
  try:
    logger.info('Submitting %s jobs' % len(params))
    slurm_working_dir = get_slurm_working_dir(job_name)
    logger.info('slurm_working_dir %s' % slurm_working_dir)
    job_manager = SlurmJobManager(slurm_working_dir)
    all_nodes = job_manager.get_available_nodes()
    skip_nodes = []
    if FLAGS.filter_nodes:
      skip_nodes = FLAGS.filter_nodes.split(',')
    use_nodes = []
    if skip_nodes:
      use_nodes = [node for node in all_nodes if node not in skip_nodes]
    print(use_nodes)
    use_nodes_constraint = '|'.join(use_nodes)
    trigger = SlurmJob('trigger', 'echo 0', trigger_job=True)
    sim_jobs = []
    for i, param in enumerate(params):
      param = update_config(param)
      a_job_name = '%s_%s' % (job_name, i)
      sim_jobs.append(SlurmJob(a_job_name, get_a_command(module, param), mem=mem, cpu=cpu))
      if use_nodes:
        sim_jobs[-1].set_constraint(use_nodes_constraint)
    job_group = JobGroup(job_name, sim_jobs, [256], FLAGS.max_cpu)
    job_group.start_trigger.set_afterok(trigger)
    email_job = SlurmJob(
        'email',
        get_email_command('Slurm Job %s' % job_name,
                          '%s finished' % job_name,
                          get_email_content(slurm_working_dir)))
    email_job.set_afterok(job_group.end_trigger)
    all_jobs = job_group.get_all_jobs() + [email_job] + [trigger]
    job_manager.prepare_jobs(all_jobs)
    job_manager.start_job(trigger)
    logger.info('slurm job started')
    send_email('Slurm Job %s' % job_name, '%s submitted' % job_name, get_email_content())
  except Exception as e:
    print(e)
    traceback.print_exc()
    send_email('Slurm Job %s' % job_name, '%s submission failed' % job_name, traceback.format_exc())


def _run_local(cmd):
  subprocess.check_call([cmd], shell=True)


def run_local_multiprocess(job_name, module, params, cpu=None):
  logger = logging.getLogger(__name__)
  assert job_name, 'job_name is empty'
  try:
    sim_jobs = []
    for param in params:
      param = update_config(param)
      cmd = get_a_command(module, param, True)
      sim_jobs.append(cmd)
    logger.info('%d jobs' % len(sim_jobs))
    futs = dict()
    with ProcessPoolExecutor(max_workers=min(FLAGS.max_cpu, cpu or FLAGS.max_cpu)) as executor:
      for cmd in sim_jobs:
        fut = executor.submit(_run_local, cmd)
        futs[fut] = cmd
    failed_tasks = []
    for future in futures.as_completed(futs):
      try:
        future.result()
      except Exception as e:
        print(e)
        failed_tasks.append(futs[future])
    if not failed_tasks:
      send_email('Local Job %s' % job_name, '%s finished' % job_name, get_email_content())
    else:
      send_email('Local job %s' % job_name,
                 '%s failed' % job_name,
                 'Failed commands:\n%s' % '\n'.join(failed_tasks))
  except Exception as e:
    print(e)
    traceback.print_exc()
    send_email('Slurm Job %s' % job_name, '%s failed' % job_name, traceback.format_exc())


def _update_params(params):
  if FLAGS.api_override:
    for param in params:
      param['api_override'] = FLAGS.api_override
      if 'okex_api_override' in param:
        del param['okex_api_override']
      if 'binance_api_override' in param:
        del param['binance_api_override']
  else:
    if FLAGS.okex_api_override:
      for param in params:
        param['okex_api_override'] = FLAGS.okex_api_override
    if FLAGS.binance_api_override:
      for param in params:
        param['binance_api_override'] = FLAGS.binance_api_override
  for param in params:
    param['skip_run_if_output_exists'] = FLAGS.skip_run_if_output_exists
  if FLAGS.feed_machine:
    for param in params:
      param['feed_machine'] = FLAGS.feed_machine


def run_jobs(job_name, module, params, cpu=None):
  _update_params(params)
  if FLAGS.run_local:
    run_local_multiprocess(job_name, module, params, cpu=cpu)
  else:
    run_sbatch_on_slurm(job_name, module, params)
