import subprocess
import json
import optuna
import sys
import os
import shutil
import copy
import tempfile
import socket
import neptune
import neptunecontrib.monitoring.optuna as opt_utils
from coin2.strategy.pta.performance import donggu2
from types import SimpleNamespace

if len(sys.argv) != 8:
  print('./script [exp_name] [coin1_style_config_path] [okex or huobi] [target-symbol]'
        ' YYYYMMDD-YYYYMMDD each_day_duration [use_order_latency_model]')
  sys.exit(1)

exp_name = sys.argv[1]
original_config_path = sys.argv[2]
exchange = sys.argv[3]

target = sys.argv[4]
if 'THIS_WEEK' in target:
  coin2_target = target.replace('THIS_WEEK', 'WEEK')
else:
  coin2_target = target

dates = sys.argv[5]
duration_each_day = sys.argv[6]  # '24H'
use_order_latency_model = bool(int(sys.argv[7]))
name = f'{exp_name}-{exchange}-{target}'
eod_clear = True
is_usdt = ('USDT' in target)


def get_lm_export_sha():
  import pygit2
  repo = pygit2.Repository('../coin_deploy')
  return repo.head.target


neptune.init('donggu-prestolabs/lm-agg2')
e = neptune.create_experiment(name=exp_name, tags=[exp_name])
e.set_property('target', target)
e.set_property('dates', dates)
e.set_property('exchange', exchange)
e.set_property('original_config', original_config_path)
e.set_property('host', socket.gethostname())
e.set_property('lm_export', get_lm_export_sha())
callback = opt_utils.NeptuneCallback()

with open(original_config_path) as f:
  original_json = json.load(f)


def baseline_params(common, config):  # config: read-only
  config = {**common, **config}  # overwrite common with config
  return dict(
      stack=config['max_pos'] / config['lot_size'] * 2,
      threshold=config['threshold_bps'],
      lean_per_stack=config.get('lean_per_stack_bps', 0.0),
      horizon=int(config['linear_alpha_file'].split('_')[3].split('s')[0]),
      cancel_edge_bps=config.get('cancel_edge_bps', 3),
  )


def sample_params(trial):
  return dict(stack=trial.suggest_uniform('stack', 4, 16),
              threshold=trial.suggest_uniform('threshold', 2, 7),
              lean_per_stack=trial.suggest_uniform('lean_per_stack', 0, 5),
              horizon=trial.suggest_categorical('horizon', [2, 3, 5, 10, 30]),
              cancel_edge_bps=trial.suggest_uniform('cancel_edge_bps', 2, 5))


def params_to_config(params):
  with tempfile.NamedTemporaryFile('w+', suffix='.json', delete=False) as tmp_config:
    # params
    obj = copy.deepcopy(original_json)
    product_config = obj['products'][target]
    mea = next(iter(obj['common']['og_params']))
    obj['common']['og_params'][mea]['product_configs'] = {
        coin2_target: {
            'use_order_latency_model': use_order_latency_model
        }
    }
    obj['products'] = {target: product_config}
    lotsize = product_config['lot_size']

    # penalize_spread_ratio = trial.suggest_uniform('penalize_spread_ratio', 0.3, 1.2)

    params = SimpleNamespace(**params)
    product_config['linear_alpha_file'] = f'linear_alpha_zerothres_{params.horizon}s.json'
    product_config['lot_size'] = lotsize
    product_config['min_pos'] = -lotsize * params.stack * 0.5
    product_config['max_pos'] = lotsize * params.stack * 0.5
    product_config['maintain_open_pos'] = lotsize
    product_config['threshold_bps'] = params.threshold
    product_config['lean_per_stack_bps'] = params.lean_per_stack

    product_config['use_passive_post'] = True
    product_config['use_cancel_model'] = True
    product_config['cancel_edge_bps'] = params.cancel_edge_bps
    product_config['cancel_outside_bps'] = 13

    # write config
    json.dump(obj, tmp_config)
    tmp_config.flush()

  return tmp_config.name


def objective(trial, config_path):
  cmds = [
      './cc/appcoin2/experimental/donggu/research/sim.sh',
      name,
      str(trial.number),
      target,
      config_path,
      dates,
      duration_each_day
  ]
  print(f"{target} Trial #{trial.number}: {' '.join(cmds)}")
  out = subprocess.run(cmds, shell=False, capture_output=True)
  if out.returncode != 0:
    print(f'Trial #{trial.number} has failed.')
    print('stderr:')
    print(out.stderr.decode('utf-8'))
    print('==========')
    print('stdout:')
    print(out.stdout.decode('utf-8'))
    print('==========')
    raise RuntimeError('sim.sh has failed')

  out = out.stdout
  try:
    reports = list(map(json.loads, out.splitlines()))  # multi dates => 1 json report per line
  except Exception:
    print('error parsing json result from trial stdout: ', out)
    raise

  pnls = []
  assert len(reports) >= 1
  try:
    last_price = 0
    ts = 0
    for report in reports:
      for (k, v) in report.items():  # only single product
        t = v['last_ts']
        if t > ts and v['last_price_in_settle_currency'] > 0:
          ts = t
          last_price = v['last_price_in_settle_currency']
    for report in reports:
      #   available fields:
      # pnl_net
      # last_price
      # last_ts
      # pnl_unrealized
      for (k, v) in report.items():
        # not EOD
        #   pnl = realized + unrealized * last_price / v['']
        #       = realized + unrealized + unrealized*(last_price/v['']-1)
        #       = original_pnl + unrealized*scale

        # not EOD
        if eod_clear or v['last_price_in_settle_currency'] == 0:
          unrealized_scale = 0
        else:
          unrealized_scale = (last_price / v['last_price_in_settle_currency'] - 1.0)

        # trading once per 5 mins => 288 per day
        # once per 1 min => 1440
        fill_incentive = min(v['num_fills'], 288) / 288.0  # 0~1
        if v['pnl_net'] < 0:
          # + pnl? fill_incentive = 0~1.0
          # - pnl? fill_incentive = 2.0~1.0
          fill_incentive = 2 - fill_incentive

        pnls.append((v['pnl_net'] + v['pnl_unrealized'] * unrealized_scale) * fill_incentive)

  except Exception:
    print(f'{target} Trial #{trial.number}: error while reading pnl report: ', reports)
    raise

  score = donggu2(pnls)
  return score


def run(trial):
  # for baseline, sampling returns pre-deterrmined values
  config_path = params_to_config(sample_params(trial))

  score = objective(trial, config_path)

  print(f'{target} Trial #{trial.number}: {score}')

  # the pnl plot file may not be available to us yet
  local_path = (f'/scratch/local/' f'out/{name}/sim_trial{trial.number}_total.png')
  nfs_path = (f'/remote/iosg/home-2/donggu/workspace/coin/'
              f'out/{name}/sim_trial{trial.number}_total.png')

  if os.path.exists(local_path):
    e.log_artifact(artifact=local_path, destination=f'pnls/Trial_{trial.number}.png')

    os.makedirs(os.path.dirname(nfs_path), exist_ok=True)
    shutil.copyfile(local_path, nfs_path)
  else:
    print(f'pnl png is not found: {local_path}')

  e.log_artifact(artifact=config_path, destination=f'configs/Trial_{trial.number}.json')
  e.log_metric('pnl', x=trial.number, y=score)

  return score


study = optuna.create_study(study_name=name,
                            direction='maximize',
                            storage='postgresql://postgres:EU2kb6Xos9pS@10.21.105.13:5432',
                            load_if_exists=True)
print(f'exp name = {name}')
print('==== start ====')

study.enqueue_trial(baseline_params(original_json['common'],
                                    original_json['products'][target]))  # config: read-only
study.optimize(run, n_trials=150, n_jobs=1, callbacks=[callback])

opt_utils.log_study(study)

print('==== result ====')
print(study.best_params)
