# Copyright (c) 2018 Presto Labs Pte. Ltd.
# Author: jaewon

import concurrent.futures
import datetime
import json
import logging
from collections import OrderedDict
from io import StringIO

import pandas as pd
from absl import app, flags
from sqlitedict import SqliteDict

from coin.base.param_util import to_list
from coin.strategy.mm.simple_sim import result_util
from coin.strategy.bitmex_param_picker.algo import *


def load_sim_result_to_dataframe(result_dirs, name_filter=None):
  results = result_util.load_sim_result(result_dirs, name_filter=name_filter)
  df = pd.DataFrame(results)

  # Example name: 02m.04bp.-1bp.06stack.without_agg.20180701
  names = [name[:-9] for name in df['name']]
  df['param_name'] = names

  dates = [datetime.datetime.strptime(name[-8:], '%Y%m%d').date() for name in df['name']]
  df['date'] = dates

  df['ma_window'] = [int(name[0:2]) for name in df['name']]
  df['edge'] = [int(name[4:6]) for name in df['name']]
  df['close_edge'] = [int(name[9:11]) for name in df['name']]
  df['stack'] = [int(name[14:16]) for name in df['name']]
  return df


'''
def count_bucket(names, to_str=False):
  count = {}
  keys = ['low', 'mid', 'high']  #  'very_high']
  for k1 in keys:
    for k2 in keys:
      count[(k1, k2)] = 0

  for name in names:
    bkey = get_bucket_key(name)
    count[bkey] = count.get(bkey, 0) + 1

  if to_str:
    s = []
    for k1 in keys:
      for k2 in keys:
        s.append(str(count[(k1, k2)]))
      s.append(' ')
    return ''.join(s)
  else:
    return count


def count_param(names, from_idx, to_idx, to_str=False):
  count = {}
  for name in names:
    param = int(name[from_idx:to_idx])
    count[param] = count.get(param, 0) + 1

  if to_str:
    return ', '.join([('%d:%d' % (k, count[k])) for k in sorted(count.keys())])
  else:
    return count
'''


def get_pnl(sim_df, date, param_names, check_num_params=None, tag=None):
  if check_num_params is not None:
    if len(param_names) != check_num_params:
      prefix = ('%s ' % tag) if tag else ''
      logging.error('[%s] %-20s %d != %d', date, prefix, len(param_names), check_num_params)
  date_str = date.strftime('%Y%m%d')
  names = [('%s.%s' % (n, date_str)) for n in param_names]
  df = sim_df.loc[sim_df.name.isin(names)].reset_index()
  if df.empty:
    return pd.DataFrame({'param_name': [], 'pnl': []})
  return pd.DataFrame({'param_name': df.param_name, 'pnl': df.pnl})


def do_pick(date, sim_df):
  prev_date = date - datetime.timedelta(days=1)
  num_pick = 14
  num_per_bucket = 3
  static_set = [
      '10m.05bp.02bp.06stack.without_agg',
      '10m.07bp.02bp.06stack.without_agg',
      '15m.07bp.07bp.06stack.without_agg',
      '15m.09bp.-1bp.06stack.without_agg',
      '20m.07bp.07bp.06stack.without_agg',
      '20m.09bp.09bp.06stack.without_agg',
      '30m.05bp.05bp.06stack.without_agg',
      '30m.07bp.07bp.06stack.without_agg',
      '30m.09bp.09bp.06stack.without_agg'
  ]

  pnls = OrderedDict(date=date)
  picked = {'date': date}

  # Universe
  universe = pick_threshold(sim_df,
                            date,
                            pnl_mean_threshold=250,
                            qty_median_threshold=65,
                            pnl_window=40,
                            qty_window=10)
  refined_df = sim_df.loc[sim_df.param_name.isin(universe)]

  names = pick_bucket_best_n_from_historical_pnl(refined_df,
                                                 date,
                                                 100,
                                                 1,
                                                 get_bucket_key_1,
                                                 window=60,
                                                 method='best')
  refined_df = refined_df.loc[refined_df.param_name.isin(names)]

  names = pick_by_historical_pnl(refined_df,
                                 date + datetime.timedelta(days=1),
                                 num_pick,
                                 window=1,
                                 method='best')
  picked['fl'] = names
  pnls['fl'] = get_pnl(refined_df, date, names, num_pick, 'fl').pnl.sum()

  names = pick_bucket_best_n(refined_df, date, num_pick, num_per_bucket, get_bucket_key_2)
  picked['bucket_fl'] = names
  pnls['bucket_fl'] = (get_pnl(refined_df, date, names, num_pick, 'bucket_fl').pnl.sum())
  fl_names = names

  names = pick_static(sim_df, date, static_set)
  pnls['static'] = (get_pnl(sim_df, date, names).pnl.sum() * (num_pick / len(names)))

  names = pick_bucket_best_n_from_historical_pnl(refined_df,
                                                 date,
                                                 num_pick,
                                                 num_per_bucket,
                                                 get_bucket_key_2,
                                                 window=1,
                                                 method='best')
  picked['bucket_best'] = names
  pnls['bucket_best'] = (get_pnl(refined_df, date, names, num_pick, 'bucket_best').pnl.sum())

  names = pick_bucket_best_n_from_historical_pnl(refined_df,
                                                 date,
                                                 num_pick,
                                                 num_per_bucket,
                                                 get_bucket_key_2,
                                                 window=1,
                                                 method='mid')
  picked['bucket_mid'] = names
  pnls['bucket_mid'] = (get_pnl(refined_df, date, names, num_pick, 'bucket_mid').pnl.sum())

  names = pick_bucket_best_n_from_historical_pnl(refined_df,
                                                 date,
                                                 num_pick,
                                                 num_per_bucket,
                                                 get_bucket_key_2,
                                                 window=1,
                                                 method='worst')
  picked['bucket_worst'] = names
  pnls['bucket_worst'] = (get_pnl(refined_df, date, names, num_pick, 'bucket_worst').pnl.sum())

  names = pick_by_historical_pnl(refined_df, date, num_pick, window=1, method='best')
  picked['best'] = names
  pnls['best'] = get_pnl(refined_df, date, names, num_pick, 'best').pnl.sum()

  names = pick_by_historical_pnl(refined_df, date, num_pick, window=1, method='mid')
  picked['mid'] = names
  pnls['mid'] = get_pnl(refined_df, date, names, num_pick, 'mid').pnl.sum()

  names = pick_by_historical_pnl(refined_df, date, num_pick, window=8, method='worst')
  picked['worst'] = names
  pnls['worst'] = get_pnl(refined_df, date, names, num_pick, 'worst').pnl.sum()

  names = pick_bucket_worst_by_reversion(refined_df,
                                         date,
                                         num_pick,
                                         num_per_bucket,
                                         get_bucket_key_2)
  picked['bucket_reversion'] = names
  pnls['bucket_reversion'] = get_pnl(refined_df, date, names).pnl.sum()

  names = pick_hybrid(refined_df, date, num_pick)
  picked['hybrid'] = names
  pnls['hybrid'] = (get_pnl(refined_df, date, names, num_pick, 'hybrid').pnl.sum())

  names = pick_bucket_best_n_from_historical_pnl(refined_df,
                                                 date,
                                                 num_pick // 2,
                                                 num_per_bucket,
                                                 get_bucket_key_2,
                                                 window=8,
                                                 method='worst')
  names += pick_by_min_corr(refined_df, date, names)
  picked['bucket_worst_min_corr'] = names
  pnls['bucket_worst_min_corr'] = (get_pnl(refined_df,
                                           date,
                                           names,
                                           num_pick,
                                           'bucket_worst_min_corr').pnl.sum())

  # Hybrid 2
  names = pick_hybrid_2(sim_df, date, num_pick)
  picked['hybrid_2'] = names
  pnls['hybrid_2'] = get_pnl(sim_df, date, names, num_pick, 'hybrid_2').pnl.sum()

  # Hybrid 3
  names = pick_hybrid_3(sim_df, date, num_pick)
  picked['hybrid_3'] = names
  pnls['hybrid_3'] = get_pnl(sim_df, date, names, num_pick, 'hybrid_3').pnl.sum()

  # print('fl  ', date, ', '.join([s[:16] for s in picked['fl']]))
  # print('best', date, ', '.join([s[:16] for s in picked['best']]))
  # print('h3  ', date, ', '.join([s[:16] for s in picked['hybrid_3']]))

  return pnls, picked


def query_sim_result_dir(sim_job_id_db, from_date, to_date, sim_result_dir_prefix=None):
  sim_result_dir_prefix = (sim_result_dir_prefix or '/remote/iosg/home/jaewon/scratch/sim-')
  job_ids = []
  with SqliteDict(sim_job_id_db,
                  tablename='sim_job_id',
                  flag='r',
                  encode=json.dumps,
                  decode=json.loads) as table:
    cur_date = from_date
    while cur_date <= to_date:
      cur_date_str = cur_date.strftime('%Y%m%d')
      if cur_date_str in table:
        job_ids += table[cur_date_str]
      cur_date += datetime.timedelta(days=1)

  job_ids = sorted(set([str(job_id) for job_id in job_ids]))
  return [('%s%s' % (sim_result_dir_prefix, job_id)) for job_id in job_ids]


def load_sim_result(*,
                    from_date=None,
                    to_date=None,
                    from_pickle=None,
                    result_dir=None,
                    from_sim_job_id_db=None,
                    to_pickle=None):
  if from_sim_job_id_db is not None:
    assert from_pickle is None
    assert result_dir is None
    assert from_date is not None and to_date is not None
    result_dirs = query_sim_result_dir(from_sim_job_id_db, from_date, to_date)
    logging.info('Loading from %s', ', '.join(result_dirs))
    sim_df = load_sim_result_to_dataframe(result_dirs)

  elif from_pickle is not None:
    assert result_dir is None
    sim_df = pd.read_pickle(FLAGS.from_pickle)

  else:
    assert result_dir is not None
    result_dirs = to_list(result_dir)
    sim_df = load_sim_result_to_dataframe(result_dirs)

  if from_date is not None:
    sim_df = sim_df.loc[sim_df.date >= from_date]

  if to_date is not None:
    sim_df = sim_df.loc[sim_df.date <= to_date]

  # TODO(jaewon): Filter
  sim_df = sim_df.loc[sim_df['stack'] <= 24]

  if to_pickle is not None:
    assert from_pickle != to_pickle
    sim_df.to_pickle(to_pickle)

  return sim_df


def load_sim_result_from_flags():
  FLAGS = flags.FLAGS

  from_date, to_date = None, None
  if FLAGS.from_date is not None:
    from_date = datetime.datetime.strptime(FLAGS.from_date, '%Y%m%d').date()
  if FLAGS.to_date is not None:
    to_date = datetime.datetime.strptime(FLAGS.to_date, '%Y%m%d').date()

  result_dir = FLAGS.result_dir
  if result_dir is not None:
    result_dir = result_dir.split(',')

  return load_sim_result(from_date=from_date,
                         to_date=to_date,
                         from_pickle=FLAGS.from_pickle,
                         result_dir=result_dir,
                         from_sim_job_id_db=FLAGS.from_sim_job_id_db,
                         to_pickle=FLAGS.to_pickle)


def pick_param(date, sim_df):
  prev_date = date - datetime.timedelta(days=1)
  num_pick = 14

  return pick_bucket_mid(sim_df, date, num_pick)
  # return pick_hybrid_3(sim_df, date, num_pick)


def sanitize_param_name(param_name, one_side_stack):
  toks = param_name.split('.')
  assert toks[3].endswith('stack')
  assert toks[-1] == 'without_agg'

  stack = int(toks[3][:-5])
  if not one_side_stack:
    stack *= 2

  toks[3] = '%02dstack' % stack
  return '.'.join(toks[:-1])


def pick_daily_param(sim_df, pick_date, one_side_stack=False, to_portfolio_db=None):
  param_names = pick_param(pick_date, sim_df)
  param_names = [sanitize_param_name(p, one_side_stack) for p in param_names]
  param_names = sorted(param_names)

  if to_portfolio_db:
    pick_date_str = pick_date.strftime('%Y%m%d')
    with SqliteDict(to_portfolio_db,
                    tablename='portfolio',
                    autocommit=True,
                    encode=json.dumps,
                    decode=json.loads) as table:
      table[pick_date_str] = sorted(param_names)

  return param_names


def run_reference_mode(sim_df, dump_from_date=None, pnl_ma_window=None, print_to_stdout=False):
  dates = sorted(set(sim_df.date))
  dump_from_date = dump_from_date or dates[1]

  out = StringIO()
  out.write('Date range:  %s - %s\n' % (dates[0], dates[-1]))

  futures = []
  with concurrent.futures.ProcessPoolExecutor() as executor:
    for date in dates:
      if date < dump_from_date:
        continue
      fut = executor.submit(do_pick, date, sim_df)
      futures.append(fut)

  table = [fut.result()[0] for fut in futures]
  result = pd.DataFrame(table).set_index('date')
  out.write(str(result))
  out.write('\n---\n')

  if pnl_ma_window is not None:
    out.write('Moving average PnL: %d days\n' % pnl_ma_window)
    out.write(str(result.rolling(pnl_ma_window).mean()))
    out.write('\n---\n')

  out.write(str(result.describe()))
  out.write('\n---\n')

  latest_picked = futures[-1].result()[1]
  last_date_str = dates[-1].strftime('%Y%m%d')
  cols = ['name', 'num_trade', 'pnl', 'pq_sum', 'return_bp']

  method = 'bucket_mid'  # 'hybrid_3'
  rows = [('%s.%s' % (name, last_date_str)) for name in latest_picked[method]]
  out.write('[%s] pick: %s\n' % (method, last_date_str))

  picked_stat = sim_df.loc[sim_df.name.isin(rows), cols].set_index('name')
  out.write(str(picked_stat))
  out.write('\n---\n')

  out_str = out.getvalue()
  out.close()
  if print_to_stdout:
    print(out_str)

  return out_str


def main(argv):
  FLAGS = flags.FLAGS

  pd.set_option('display.max_rows', None)
  pd.set_option('display.max_columns', None)
  pd.set_option('display.width', 250)
  pd.set_option('display.float_format', '{:,.2f}'.format)

  sim_df = load_sim_result_from_flags()

  if FLAGS.pick_date is not None:
    pick_date = datetime.datetime.strptime(FLAGS.pick_date, '%Y%m%d').date()
    param_names = pick_daily_param(sim_df,
                                   pick_date,
                                   one_side_stack=FLAGS.one_side_stack,
                                   to_portfolio_db=FLAGS.to_portfolio_db)
    for name in param_names:
      print(name)
    return

  # Reference mode
  dump_from_date = None
  if FLAGS.dump_from_date is not None:
    dump_from_date = (datetime.datetime.strptime(FLAGS.dump_from_date, '%Y%m%d').date())

  run_reference_mode(sim_df,
                     dump_from_date=dump_from_date,
                     pnl_ma_window=FLAGS.show_pnl_ma,
                     print_to_stdout=True)


if __name__ == '__main__':
  flags.DEFINE_string('from_pickle',
                      None,
                      'If specified, it will load from pandas DataFrame pickle.')
  flags.DEFINE_string(
      'to_pickle',
      None,
      'If specified, after reading data from result_dir, DataFrame will be '
      'saved to the specified file.')
  flags.DEFINE_string('from_sim_job_id_db', None, 'Sim Job ID DB')
  flags.DEFINE_string('result_dir', None, 'Result directory')
  flags.DEFINE_string('from_date', None, 'Load results from the specified date')
  flags.DEFINE_string('to_date', None, 'Load results to the specified date')
  flags.DEFINE_string('pick_date', None, 'Print parameters of the specific date')
  flags.DEFINE_boolean('one_side_stack', False, 'If true, it uses one side stack')
  flags.DEFINE_string('to_portfolio_db', None, 'Portfolio DB')
  flags.DEFINE_integer('show_pnl_ma', None, 'If specified, it shows moving average pnl.')
  flags.DEFINE_string('dump_from_date', None, 'If specified, it dumps from the specified date.')
  app.run(main)
