# Copyright (c) 2022 Presto Labs Pte. Ltd.
# Author: jingyuan

import glob
import fire
import os
import re
import tempfile
import json

import lightgbm
import tqdm
import seaborn as sns
from matplotlib import pyplot as plt
import pathlib
import numpy as np
import pandas as pd
from collections import defaultdict

from python.coin_deploy.basis_strat2.strat_list import PROD_STRATS
import coin.experimental.yuxuan.model.feature_config.parser as parser


IMP = 'gain'


def extrac_model(model_path):
  with tempfile.TemporaryDirectory() as tmpdir:
    model_name = pathlib.Path(model_path).stem
    os.system(f'unzip -o {model_path} -d {tmpdir}')
    py_model = f'{tmpdir}/{model_name}.txt'
    if os.path.exists(py_model):
      model = lightgbm.Booster(model_file=py_model)
      return model


def ls_models(regex=None):
  models = []
  for s in PROD_STRATS:
    if regex is None or re.match(regex, s['name']):
      with open(f'python/coin_deploy/basis_strat2/config/{s["name"]}.json', 'r') as f:
        config = json.load(f)
        if 'model_config_name' not in config:
          continue
        config = config['model_config_name']
      model = extrac_model(f'python/coin_deploy/basis_strat2/model/zips/{config}.zip')
      if model:
        models.append((model, s['name']))
  return models


def get_timehorizon(param):
  for t in param.split('_'):
    if t.isdigit():
      t = int(t)
      if t > 1000:
        return t / 1000
      else:
        return t
  return param


def n_gram(word, limit):
  if limit == 1:
    return {(w,) for w in word}
  else:
    ans = set()
    for i in range(len(word) - limit + 1):
      ans |= {(word[i],) + w for w in n_gram(word[i + 1:], limit - 1)}
    return ans


def norm_fi(regex=None, path=None):
  models = []
  if regex is not None:
    models += ls_models(regex)
  if path is not None:
    for p1 in path.split(','):
      for p2 in glob.glob(p1):
        if p2.endswith('.txt') and 'command' not in p2:
          try:
            model = lightgbm.Booster(model_file=p2)
            models.append((model, p2))
          except lightgbm.basic.LightGBMError:
            pass
  fis = defaultdict(float)
  fi_counts = defaultdict(int)
  for model, p2 in models:
    avg = max(np.median(model.feature_importance(IMP)), 1)
    # assert avg > 0, p2
    for name, count in zip(model.feature_name(), model.feature_importance(IMP)):
      name = process_feature_name(name)
      fis[name] += (count - avg) / avg
      fi_counts[name] += 1
  for name, count in fis.items():
    fis[name] /= fi_counts[name]
  df = pd.DataFrame(
      {
          'fis': [fis[name] for name in fis],
          'count': [fi_counts[name] for name in fis]
      },
      index=fis)
  # uncoment to ignore less common features
  # df = df[df['count'] > df['count'].quantile(0.05)]
  print("total models", len(models))
  return df


def process_feature_name(name):
  for prefix in ['cnorm', 'cmean', 'cstd', 'cnors']:
    # make all symbol count to 30
    name = re.sub(f'{prefix}_\d+', f'{prefix}_30', name)
    # make be netu to be
    name = re.sub(f'{prefix}_[a-z]+', f'{prefix}_be', name)
  assert 'cnorm_20' not in name, name
  return name


def n_gram_fi(regex=None, path=None, limit=5, csv_path='tmp/'):
  fis = norm_fi(regex, path)
  fi_grams = defaultdict(lambda: defaultdict(float))
  for name, row in tqdm.tqdm(fis.iterrows(), leave=False):
    name = name.split("_")
    for i in range(1, limit + 1):
      for gram in n_gram(name, i):
        fi_grams[i][gram] += row['fis']
  if regex is not None:
    csv_path += "".join(w for w in regex if w.isalnum())
  else:
    csv_path += "".join(w for w in path if w.isalnum())
  fis = fis.sort_values('fis')
  print(f'full score saved to {csv_path}_fi.csv, top unused features')
  fis.to_csv(csv_path + '_fi.csv')
  print(fis.head(20))
  if not os.path.exists(csv_path):
    os.makedirs(csv_path)
  for i in range(1, limit + 1):
    pd.Series(fi_grams[i]).sort_values().to_csv(f'{csv_path}/{i}.csv')
  return None


def features_box(regex=None, path=None, features=''):
  if isinstance(features, str):
    features = features.split(',')
  useful = norm_fi(regex, path)
  all_features = {'.*': useful['fis']}
  for f in features:
    if np.any(useful.index.str.contains(f)):
      the_idx = useful.index.str.contains(f)
      all_features[f'{f} cnt:{the_idx.sum()}'] = useful['fis'].iloc[the_idx]
  plt.figure(figsize=(20, 20))
  sns.boxplot(
      data=pd.DataFrame(all_features),
      showfliers=False,
      showmeans=True,
  )
  plt.axhline(0, color='red')
  plt.suptitle(f'feature importance {IMP}')
  sns.stripplot(data=pd.DataFrame(all_features))
  png_name = "".join(w for w in "".join(features) if w.isalnum())[:50]
  plt.savefig(f'tmp/{png_name}.png')
  print('saved to tmp/' + png_name + '.png')
  return


def sum_fi(regex=None):
  models = ls_models(regex)
  fi = defaultdict(int)
  fi_split = defaultdict(lambda: defaultdict(int))
  for model in models:
    for name, count in zip(model.feature_name(), model.feature_importance(IMP)):

      for component in parser.tokenize(name)[0]:
        feature, param = component.split('_', 1)
        fi_split[feature][get_timehorizon(param)] += count

      fi[name] += count
  return fi, fi_split


# check what features are useful/useless for PROD model
# ./pyrunner python/experimental/jingyuan/exam_model.py n_gram_fi  --regex=model_smm_binance_perp --limit=5 --csv_path=tmp/

# compare the level of usefulness in plot for PROD model
# ./pyrunner python/experimental/jingyuan/exam_model.py features_box  --regex=model_smm_binance_perp --features="nvol_.*_5000_,nvolp_.*_5000,nvolp2_.*_5000"

# z score of feature importance for model in a path
# ./pyrunner python/experimental/jingyuan/exam_model.py n_gram_fi  --path="/home/yuxuan/workspace/coin/test*/*" --limit=2 --csv_path=tmp/

# compare shape of sarlen,minus_2_vwap,vwap feature importance, saved pic to
# ./pyrunner python/experimental/jingyuan/exam_model.py features_box  --path="/home/yuxuan/workspace/coin/test*/*" --features='sarlen,minus_2_vwap,vwap'

if __name__ == '__main__':
  fire.Fire()
