# Copyright (c) 2019 Presto Labs Pte. Ltd.
# Author: fengyang

import datetime
import json
import os
import toml

from absl import app, flags
FLAGS = flags.FLAGS
import pandas as pd

from xunkemgmt_client.tool.slack_noti import send_to_slack

flow_root = "/remote/iosg/coin-mirror-1/buckets/feed.raw.coin"
config_root = "data/coin2/feed/ops/feed-writer"
num_workders = 2
min_file_size = 50000  # 50k

machines = [
    "feed-01.ap-northeast-1.aws.huobi",
    "feed-02.ap-northeast-2.aws",
    "feed-02.eu-west-1.aws",
    "feed-02.us-east-1.aws",
    "feed-05.ap-northeast-1.aws",
    "feed-05.cn-hongkong.aliyun",
    "feed-01.ap-southeast-1.aws",
]


def filename_to_table(machine, date, root_dir):
  root_path = os.path.join(root_dir, machine, date)
  if not os.path.exists(root_path):
    print("%s not exists" % root_path)
    return []

  _,sub_dirs,sub_files = next(os.walk(root_path))
  sub_dirs = [os.path.join(root_path, sub_dir) for sub_dir in sub_dirs]
  sub_dirs.append(root_path)
  data_table = []
  for sub_dir in sub_dirs:
    file_list = os.listdir(sub_dir)
    if len(sub_files)>0:
      file_list.extend(sub_files)
    # spot_binance_v1_j2.queue-0030zxf6f7.20200301-113000Z.gz
    for filename in file_list:
      if os.path.isdir(os.path.join(sub_dir, filename)):
        continue
      components = filename.split('.')
      if len(components) != 4:
        print(filename)
        send_to_slack("sync up not finished {}".format(date), 'coin_feed_noti', 'msg')
        assert False, "sync up may not finished - " + machine

      ext = components[3]
      assert ext == 'gz'
      # market exchange api group woker
      meagw = components[0]
      created_time = datetime.datetime.strptime(components[2], "%Y%m%d-%H%M%SZ")
      full_path = os.path.join(sub_dir, filename)
      if filename in sub_files:
        full_path = os.path.join(root_path, filename)
      file_size = os.path.getsize(full_path)
      data_table.append([full_path, file_size, machine, meagw, created_time])
  return data_table


def file_info_to_dataset(date, flow_root, machines):
  data_table = []
  for machine in machines:
    data_table.extend(filename_to_table(machine, date, flow_root))
  label = ['path', 'size', 'machine', 'prefix', 'time']
  df = pd.DataFrame(data_table, columns=label)
  return df


def get_machine_config(machine, num_workders, config_dir):
  json_file = machine + ".json"
  config_path = os.path.join(config_dir, json_file)
  with open(config_path) as f:
    data = json.load(f)
  exchanges = data['feed']['exchanges']
  symbol_groups = None
  with open("data/coin2/feed/symbol_groups.toml") as f:
    symbol_groups = toml.load(f)

  prefix_list = []
  for mea, config in exchanges.items():
    groups = config["products"]["groups"]
    if "_split_channel" in mea:
      continue
    for group in groups:
      mea_prefixs = [mea.lower().replace('.', "_")] 
      if len(symbol_groups[mea_prefixs[0]][group]) == 0:
        continue
      if mea == 'Spot.Uniswap.v3':
        mea_prefixs = ['uniswapv3_rest_trade', 'uniswapv3_rest_depth']
      if mea == 'Spot.Uniswap.v2':
        mea_prefixs = ['uniswapv2_rest_trade', 'uniswapv2_rest_depth']
      if mea == 'Spot.Uniswap.v3-arbitrum':
        mea_prefixs = ['uniswapv3-arbitrum_rest_depth']
      if mea == 'Spot.Pancakeswap.v2':
        mea_prefixs = ['pancakeswapv2_rest_trade',
                       'pancakeswapv2_rest_reserve', 'pancakeswapv2_rest_depth']

      num = 1
      while num <= num_workders:
        for mea_prefix in mea_prefixs:
          prefix = "{}_{}{}".format(mea_prefix, group, num)
          prefix_list.append(prefix)
        num = num + 1
  return prefix_list


def get_groupby_30min(machine, prefix, date, dataset):
  df = dataset[(dataset['prefix'] == prefix) & (dataset['machine'] == machine)]
  df_group = df.groupby(pd.Grouper(key='time', freq='30Min'))
  stat = df_group['size'].agg(total='sum', count='count')
  return stat


# prefix is market_exchange_api_groupworker
def check_individual_files(date, stat, prefix):
  report = pd.DataFrame()

  time_range = pd.date_range(date, periods=48, freq='30Min')
  if 'pancakeswap' in prefix or 'uniswap' in prefix:
    time_range = pd.date_range(date, periods=1, freq='30Min')
  missed_index = time_range[~time_range.isin(stat.index)]
  missed_index.set_names('time')
  missed = pd.DataFrame(index=missed_index)
  missed['status'] = 'miss'
  report = report.append(missed)

  crashed = pd.DataFrame(index=stat[stat['count'] > 1].index)
  crashed['status'] = 'crash'
  report = report.append(crashed)

  too_small = pd.DataFrame(index=stat[stat['total'] < min_file_size].index)
  too_small['status'] = 'too_small'
  report = report.append(too_small)

  return report


def compare_machines(date, dataset):
  time_range = pd.date_range(date, periods=48, freq='30Min')
  prefixes = dataset['prefix'].unique()
  reports = pd.DataFrame()
  for timepoint in time_range:
    for prefix in prefixes:
      slice_data = dataset[(dataset['time'] == timepoint) & (dataset['prefix'] == prefix)]
      max_size = slice_data['total'].max()
      min_size = slice_data['total'].min()
      median = slice_data['total'].median()
      if (max_size - median) / median > 0.1:
        too_big = slice_data[slice_data['total'] == max_size].copy()
        too_big['status'] = 'too_big'
        reports = reports.append(too_big)
      if (median - min_size) / median > 0.1:
        too_small = slice_data[slice_data['total'] == min_size].copy()
        too_small['status'] = 'too_small'
        reports = reports.append(too_small)
  return reports


def summarise_crash(dataset):
  df = dataset[dataset['status'] == 'crash']

  def check_worker(meaw_list):
    workers = set()
    for l in meaw_list:
      if l[-1] == '1':
        workers.add("1")
      elif l[-1] == '2':
        workers.add("2")
      else:
        assert False, "not support " + l
    return ",".join(list(workers))

  df_crash = df.groupby(['machine', 'index'])['prefix'].apply(check_worker)
  return df_crash


def summarise_miss(dataset):
  filter_out_mea = (
      "futures_huobi_v1-swap-noti",
      "futures_huobi_v1-noti",
  )
  df = dataset[
      (dataset['status'] == 'miss') &
      (~ dataset['prefix'].str.startswith(filter_out_mea))]

  def check_continue_time(time_index):
    time_range = []
    start_time = None
    end_time = None
    for timepoint in time_index:
      if start_time is None:
        start_time = timepoint
        continue
      if end_time is None:
        end_time = timepoint
        continue
      if (timepoint - end_time) < datetime.timedelta(hours=1):
        end_time = timepoint
        continue

      end_time = timepoint
      time_range.append("{}~{}".format(start_time, end_time))
      start_time = None
      end_time = None
    if (start_time is not None):
      if end_time is not None:
        time_range.append("{}~{}".format(start_time, end_time))
      else:
        time_range.append(str(start_time))

    return ",".join(time_range)

  df_miss = (df.groupby(['machine',
                         'prefix'])['index'].apply(check_continue_time).reset_index(name='time'))
  return df_miss


def check_unfinished_machine(date, dataset):
  timepoint = pd.Timestamp(date + "T23:30:00")
  last_time = dataset.groupby('machine')['time'].max()
  unfinished = last_time[last_time < timepoint]
  return unfinished.reset_index(name='max_time')


def main(_):
  assert FLAGS.date, "Speccify date"
  _flow_root = FLAGS.flow_root or flow_root
  _machines = FLAGS.machines or machines
  dataset = file_info_to_dataset(FLAGS.date, _flow_root, _machines)
  groupby_table = pd.DataFrame()
  individual_reports = pd.DataFrame()
  for machine in _machines:
    prefix_list = get_machine_config(machine, num_workders, config_root)
    report = pd.DataFrame()
    for prefix in prefix_list:
      goupby_data = get_groupby_30min(machine, prefix, FLAGS.date, dataset)
      stats = check_individual_files(FLAGS.date, goupby_data, prefix)
      stats['machine'] = machine
      stats['prefix'] = prefix
      report = report.append(stats)

      file_size_data = goupby_data['total']
      file_size_data = file_size_data.reset_index()
      file_size_data['machine'] = machine
      file_size_data['prefix'] = prefix
      groupby_table = groupby_table.append(file_size_data)

    individual_reports = individual_reports.append(report)

  individual_reports = individual_reports.reset_index()

  groupby_table = groupby_table.reset_index()
  # different machines subscribed channels are different
  # compare_reports = compare_machines(FLAGS.date, groupby_table)
  # compare_reports = compare_reports.reset_index()

  unfinished = check_unfinished_machine(FLAGS.date, groupby_table)

  has_msg = False
  message = "Feed file status " + FLAGS.date + '\n'
  if len(unfinished) > 0:
    sync_title = "\nSync up not finished machine -----------------------\n"
    print(sync_title)
    print(unfinished)
    message = message + sync_title + unfinished.to_string()
    has_msg = True

  pd.option_context('display.max_rows', None, 'display.max_columns', None)
  df_crash = summarise_crash(individual_reports)
  if len(df_crash) > 0:
    crash_title = "\nVanilla crash summary ---------------------------------\n"
    print(crash_title)
    print(df_crash)
    message = message + crash_title + df_crash.to_string()
    has_msg = True

  df_miss = summarise_miss(individual_reports)
  if len(df_miss) > 0:
    miss_title = "\nFeed file missing summary ---------------------------------\n"
    print(miss_title)
    print(df_miss)
    message = message + miss_title + df_miss.to_string()
    has_msg = True

  if has_msg and FLAGS.slack_send:
    send_to_slack(message, 'coin_feed_noti', 'msg')


if __name__ == '__main__':
  flags.DEFINE_string('date', None, 'yyyymmdd')
  flags.DEFINE_boolean('slack_send', False, 'send result to slack')
  flags.DEFINE_list('machines', None, "Separate by comma")
  flags.DEFINE_string('flow_root', None, "Flow root dir")

  app.run(main)
