# Copyright (c) 2020 Presto Labs Pte. Ltd.
# Author: jhkim

import os
import collections
import json
import pandas
import numpy
from absl import app, flags

import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages


def lcp(strs):
  if len(strs) == 0:
    return ""
  current = strs[0]
  for i in range(1, len(strs)):
    temp = ""
    if len(current) == 0:
      break
    for j in range(len(strs[i])):
      if j < len(current) and current[j] == strs[i][j]:
        temp += current[j]
      else:
        break
    current = temp
  return current


def main(_):
  assert flags.FLAGS.aggr_spec_file is not None
  os.makedirs(flags.FLAGS.sim_result_dir, exist_ok=True)
  aggr_spec = json.load(open(flags.FLAGS.aggr_spec_file))

  filenames_path = os.path.join(flags.FLAGS.sim_result_dir, "..", "filenames.txt")
  if os.path.exists(filenames_path):
    filenames = sorted(open(filenames_path).read().strip().split("\n"))
    open(os.path.join(flags.FLAGS.sim_result_dir, "..", "filenames_sorted.txt"), 'w').write("\n".join(filenames))

  sparam_statdfs = collections.defaultdict(lambda: collections.defaultdict(list))
  statdfs = collections.defaultdict(list)

  for stat_csv_filename, sparam in zip(aggr_spec['stat_csv_filenames'], aggr_spec['sparams']):
    if os.path.exists(stat_csv_filename):
      statdf = pandas.read_csv(stat_csv_filename)
      statcol = [col for col in statdf.columns if col not in sparam.keys()]
      colshash = str(sorted(statcol))
      for spkey, spval in sparam.items():
        if isinstance(spval, list):
          statdf['S_' + spkey] = "-".join(map(str, spval))
        else:
          statdf['S_' + spkey] = spval
      statdfs[colshash].append(statdf)
      sparam_statdfs[str(sparam)][colshash].append(statdf)

  globalpref = ""
  if len(statdfs) > 0:
    globalpref = lcp(
        pandas.concat([pandas.concat(dfs, axis=0, sort=False) for dfs in statdfs.values()],
                      axis=0,
                      sort=False)['sim_prefix'].reset_index(drop=True))

    def print_df_groupby_ccys(statdfs_ccy):
      for key, dfs in statdfs_ccy.items():
        totdf = pandas.concat(dfs, axis=0, sort=False).set_index('sim_prefix')
        totdf = totdf.loc[totdf['product'] == 'Total']
        yield totdf

    for key, sparam_statdfs_ccy in sparam_statdfs.items():
      for totdf in print_df_groupby_ccys(sparam_statdfs_ccy):
        cols = [column for column in totdf.columns if column.find('pnl') >= 0]

    def print_result_order_by_pnl_net(totdf):
      pnl_net_col = [col for col in totdf.columns if col.startswith('pnl_net')]
      sorted_df = totdf.sort_values(by=pnl_net_col[0], ascending=False)
      return sorted_df

    for totdf in print_df_groupby_ccys(statdfs):
      output_path = f'{flags.FLAGS.sim_result_dir}/combined_{globalpref[:100]}_{flags.FLAGS.sim_result_postfix}.csv'
      df = print_result_order_by_pnl_net(totdf)
      df.reset_index().to_csv(output_path, index=False)

    # Per product result
    def print_df_groupby_ccys_products(statdfs_ccy):
      for key, dfs in statdfs_ccy.items():
        totdf = pandas.concat(dfs, axis=0, sort=False)
        if numpy.unique(totdf['product']).shape[0] > 1:
          totdf = totdf[totdf['product'] != 'Total']
          for product, pdf in totdf.groupby('product'):
            totdf = pdf.set_index('sim_prefix')
            yield product, totdf

    def print_df_groupby_ccys_tot(statdfs_ccy):
      for key, dfs in statdfs_ccy.items():
        totdf = pandas.concat(dfs, axis=0, sort=False)
        if numpy.unique(totdf['product']).shape[0] > 1:
          totdf = totdf[totdf['product'] == 'Total']
          yield totdf

    plt.rcParams['lines.linewidth'] = 0.5
    plt.rcParams['figure.figsize'] = 8, 4
    plt.rcParams['font.size'] = 6
    plt.rcParams['legend.fontsize'] = 6
    plt.rcParams['xtick.labelsize'] = 6
    plt.rcParams['ytick.labelsize'] = 6
    pandas.set_option("display.precision", 3)

    import glob
    import matplotlib.pylab as pl

    bdf = None
    outfile  = open(
        f'{flags.FLAGS.sim_result_dir}/chosen_{globalpref[:100]}' \
        f'_{flags.FLAGS.sim_result_postfix}.txt', 'w')

    for product, totdf in print_df_groupby_ccys_products(statdfs):
      df = print_result_order_by_pnl_net(totdf)
      df = df.reset_index()
      df['product'] = product
      bdf = df if bdf is None else pandas.concat([bdf, df], axis=0, sort=False)
      if len(totdf) > 0:
        common_prefix = lcp(totdf.index)

        df = df.reset_index(drop=True)
        paramcols = [
            col for col in df.columns if col.startswith("S_")
            and df[col].dtype.kind in 'fi' and len(df[col].unique())>1 ]
        netcols = [col for col in df.columns if "pnl_net" in col]
        target_sharpe_col = 'sharpe_1440min' if flags.FLAGS.is_focus_mt else 'sharpe_30min'
        evalcols = ['net_ret(bps)', target_sharpe_col, 'fill_count'] + netcols
        chosen = df[paramcols].isna().all(axis=1)

        def print_row(row):
          params = {
              key.replace("S_", ""): value
              for key, value in row.to_dict().items()
              if key.startswith("S_") and not value is None and
              (isinstance(value, str) or not numpy.isnan(value))
          }
          print(params, file=outfile)
          prtdf = pandas.DataFrame([row[['product'] + evalcols]])
          print(prtdf.to_string(), file=outfile)

        if chosen.sum() == 1:
          print("-" * 50, file=outfile)
          print(common_prefix, file=outfile)
          print("BM", file=outfile)
          chosen_row = df.loc[chosen].iloc[0]
          print_row(chosen_row)
        for evalcol in evalcols:
          if not numpy.isnan(df[[evalcol]].idxmax()).any():
            print(f"BEST {evalcol}", file=outfile)
            chosen_row = df.loc[df[[evalcol]].idxmax()].iloc[0]
            print_row(chosen_row)
        paramcols = paramcols + ['idx']

    if bdf is not None:
      output_path = f'{flags.FLAGS.sim_result_dir}/breakdown_{globalpref[:100]}' \
          f'_{flags.FLAGS.sim_result_postfix}.csv'
      cols = bdf.columns
      cols = [cols[1], cols[0]] + cols[2:].tolist()
      bdf[cols].to_csv(output_path, index=False)


if __name__ == '__main__':
  flags.DEFINE_string('aggr_spec_file', None, '')
  flags.DEFINE_string('sim_result_dir', None, '')
  flags.DEFINE_string('sim_result_postfix', None, '')
  flags.DEFINE_bool('is_focus_mt', False, '')
  app.run(main)
