# Copyright (c) 2019 Presto Labs Pte. Ltd.
# Author: xguo

import collections
import datetime
import functools
import json
import logging
import os
import time
import typing

from absl import app, flags
import numpy as np
import pytz
import google.protobuf.json_format as json_format
from cachetools import TTLCache

from coin.base.datetime_util import to_datetime
from coin.base.param_util import to_list
import coin.proto.coin_telemetry_pb2
from coin.proto.coin_request_pb2 import (
    CalculatorInfoProto,
    ExchangeApiRequestProto,
    StrategyRequestProto,
    ExchangeApiInfoProto)
from coin.proto.coin_strategy_pb2 import StrategyLog
from coin.proto.coin_telemetry_pb2 import (LatencyMarkPoint,
                                           LatencyStatsProto,
                                           StrategyLatencyStatsProto,
                                           StrategyComponentLatencyStatsProto,
                                           TelemetryProto)
from coin.support.accounting.logic.query_util import ExchangeApiRequest
from coin.base.git_util import fetch_git_commit_sha_datetime
from coin.support.proto_log.logic.util import run_from_strat_log_archive
from xunkemgmt_client.client.util.query_util import get_exchange_api_id_from_request


FLAGS = flags.FLAGS

LatencyIntervalKey = collections.namedtuple(
    'LatencyIntervalKey',
    ['latency_type', 'exchange_api_request', 'latency_tag'])


@functools.lru_cache(maxsize=4096)
def _get_latency_type_list(point_name: str):
  name_map = {
      'ET_FT_DIFF': [('RAW_FEED_EXCHANGE_PUBLISHED', 'RAW_FEED_RECEIVED')],
      'BOOK_ET_FT_DIFF': [('RAW_BOOK_FEED_EXCHANGE_PUBLISHED', 'RAW_FEED_RECEIVED')],
      'TRADE_ET_FT_DIFF': [('RAW_TRADE_FEED_EXCHANGE_PUBLISHED', 'RAW_FEED_RECEIVED')],
      'FEED_READING': [('RAW_FEED_READ_STARTED', 'RAW_FEED_RECEIVED')],
      'FEED_PENDING': [('RAW_FEED_RECEIVED', 'RAW_FEED_PARSE_STARTED')],
      'FEED_PENDING_NUMBER': [('RAW_FEED_PENDING_NUMBER', 'RAW_FEED_PENDING_NUMBER')],
      'FEED_RECEIVED_NUMBER': [('RAW_FEED_RECEIVED_NUMBER', 'RAW_FEED_RECEIVED_NUMBER')],
      'FEED_PRE_READ_AVAILABLE': [('RAW_FEED_PRE_READ_AVAILABLE',
                                   'RAW_FEED_PRE_READ_AVAILABLE')],
      'BOOK_FEED_PARSER': [('RAW_FEED_PARSE_STARTED', 'PARSED_BOOK_FEED_PUBLISHED')],
      'TRADE_FEED_PARSER': [('RAW_FEED_PARSE_STARTED', 'PARSED_TRADE_FEED_PUBLISHED')],
      'PARSED_FEED_PENDING': [('PARSED_BOOK_FEED_PUBLISHED', 'ON_BOOK_FEED'),
                              ('PARSED_TRADE_FEED_PUBLISHED', 'ON_TRADE_FEED')],
      'PARSED_FEED_PENDING_NUMBER': [
          ('PARSED_FEED_PENDING_NUMBER', 'PARSED_FEED_PENDING_NUMBER')],
      'PARSED_FEED_PUBLISHED_NUMBER': [
          ('PARSED_FEED_PUBLISHED_NUMBER', 'PARSED_FEED_PUBLISHED_NUMBER')],
      'BOOK_PRICER': [('ON_BOOK_FEED', 'BEFORE_MANAGE_MM_ORDERS'),
                      ('ON_BOOK_FEED', 'BEFORE_MANAGE_AGG_ORDERS')],
      'TRADE_PRICER': [('ON_TRADE_FEED', 'BEFORE_MANAGE_MM_ORDERS'),
                       ('ON_TRADE_FEED', 'BEFORE_MANAGE_AGG_ORDERS')],
      'OG_ORDER_SUBMIT': [('BEFORE_OG_ORDER_SUBMIT', 'AFTER_OG_ORDER_SUBMIT')],
      'OG_ORDER_CANCEL': [('BEFORE_OG_ORDER_CANCEL', 'AFTER_OG_ORDER_CANCEL')],
      'OG_REST_TIMEOUT_ERROR_NUMBER': [
          ('OG_REST_TIMEOUT_ERROR', 'OG_REST_TIMEOUT_ERROR')],
  }
  key_set = set()
  for key, value in name_map.items():
    for pair in value:
      if point_name == pair[0]:
        key_set.add((key, 0))
      elif point_name == pair[1]:
        key_set.add((key, 1))
  return list(key_set)


def _get_interval_key(latency_type: str,
                      latency_mark_point: LatencyMarkPoint) -> LatencyIntervalKey:
  if latency_mark_point.HasField('account_request'):
    assert latency_mark_point.account_request.HasField('market_type')
    assert latency_mark_point.account_request.HasField('exchange')
  if latency_type in ('BOOK_PRICER', 'TRADE_PRICER', 'FEED_READING', 'FEED_PENDING',
                      'FEED_PRE_READ_AVAILABLE', 'FEED_PENDING_NUMBER',
                      'FEED_RECEIVED_NUMBER', 'PARSED_FEED_PENDING_NUMBER',
                      'PARSED_FEED_PUBLISHED_NUMBER'):
    account_request_dict = {}
  else:
    account_request_dict = json_format.MessageToDict(latency_mark_point.account_request,
                                                     preserving_proto_field_name=True)
  exchange_api_request = ExchangeApiRequest(
      market_type=account_request_dict.get('market_type'),
      exchange=account_request_dict.get('exchange'),
      api_version=account_request_dict.get('api_version'))
  return LatencyIntervalKey(latency_type=latency_type,
                            exchange_api_request=exchange_api_request,
                            latency_tag=latency_mark_point.tag)


def convert_to_interval(mark_points: typing.List[LatencyMarkPoint]
    ) -> typing.Dict[LatencyIntervalKey, typing.Union[int, float]]:
  interval_dict: typing.Dict[LatencyIntervalKey,
                             typing.Union[LatencyMarkPoint, int, float]] = {}
  non_timestamp_latency_types = [
      'FEED_PENDING_NUMBER', 'FEED_RECEIVED_NUMBER', 'FEED_PRE_READ_AVAILABLE',
      'PARSED_FEED_PENDING_NUMBER', 'PARSED_FEED_PUBLISHED_NUMBER',
  ]
  for point in mark_points:
    point_name = coin.proto.coin_telemetry_pb2.LifeOfSignal.Name(point.point_name)
    latency_type_list = _get_latency_type_list(point_name)
    for latency_type, pos in latency_type_list:
      key = _get_interval_key(latency_type, point)
      if key.latency_type == 'OG_REST_TIMEOUT_ERROR_NUMBER':
        # any number is fine, because this value is not used in stats
        interval_dict[key] = 1
        continue
      # here |timestamp| field doesn't mean timestamp but PENDING/RECEIVED/PRE_READ
      # number
      if key.latency_type in non_timestamp_latency_types:
        if point.HasField('timestamp'):
          interval_dict[key] = point.timestamp
        continue
      else:
        if point.timestamp == 0:
          continue
      if key in interval_dict:
        before_point = interval_dict[key]
        interval_dict[key] = point.timestamp - before_point.timestamp
      elif pos == 0:
        interval_dict[key] = point

  for key, value in interval_dict.copy().items():
    if not isinstance(value, (int, float)):
      interval_dict.pop(key, None)
  return interval_dict


def _get_stats(data: typing.List[typing.Union[int, float]]) -> LatencyStatsProto:
  percentiles = []
  for n in [10, 25, 50, 75, 90]:
    percentiles.append(
        LatencyStatsProto.PercentileStatsProto(
            percentile=n, value=int(np.percentile(data, n))))
  return LatencyStatsProto(
      count=len(data),
      max=int(np.max(data)),
      min=int(np.min(data)),
      std=int(np.std(data)),
      median=int(np.median(data)),
      percentiles=percentiles,
  )


class LatencyStatsCalculator(object):
  def __init__(self,
               strategy_request: StrategyRequestProto,
               time_window_sec: int=600,
               store_raw_data: bool=False):
    self._strategy_request = strategy_request
    self._record_timestamp = None
    self._time_window_sec = time_window_sec
    self._cache = TTLCache(
        maxsize=4096 * 8, ttl=time_window_sec, timer=self.get_record_time)
    self._store_raw_data = store_raw_data
    self._raw_data = []
    git_commit_datetime, git_commit_sha = fetch_git_commit_sha_datetime('HEAD')
    self._git_commit_sha = git_commit_sha
    self._git_commit_datetime = \
        int(git_commit_datetime.replace(tzinfo=pytz.UTC).timestamp() * 10**9)
    self._exchange_api_universe: typing.Optional[typing.List[ExchangeApiInfoProto]] = \
        None

  @property
  def raw_data(self):
    return self._raw_data

  def get_latency_stats(self,
                        expire: bool=True,
                        expire_timestamp: typing.Optional[int]=None
      ) -> StrategyLatencyStatsProto:
    expire_time = None
    if expire:
      expire_time = expire_timestamp / 10**9 if expire_timestamp is not None \
                    else time.time()
      self._cache.expire(time=expire_time)
    end_dt = datetime.datetime.utcfromtimestamp(expire_time)
    start_dt = end_dt - datetime.timedelta(seconds=self._time_window_sec)

    all_data: typing.DefaultDict[LatencyIntervalKey,
                                 typing.List[typing.Union[int, float]]] = \
        collections.defaultdict(list)
    strategy_name = self._strategy_request.strategy_name
    prev_pid = None
    for elem in self._cache.values():
      pid = None
      if elem.process.HasField('pid'):
        pid = elem.process.pid
      latency_dict = convert_to_interval(elem.telemetry.latency.mark_points)
      for (latency_type, exchange_api_request, _), value in latency_dict.items():
        if latency_type == 'OG_REST_TIMEOUT_ERROR_NUMBER':
          if len(all_data[(latency_type, exchange_api_request)]) == 0:
            all_data[(latency_type, exchange_api_request)].append(0)
          all_data[(latency_type, exchange_api_request)][0] += 1
        else:
          # FEED_RECEIVED_NUMBER/PARSED_FEED_PUBLISHED_NUMBER decreases when pid changed
          if latency_type in ('FEED_RECEIVED_NUMBER', 'PARSED_FEED_PUBLISHED_NUMBER'):
            existing_values = all_data[(latency_type, exchange_api_request)]
            prev_value = existing_values[-1] if len(existing_values) > 0 else None
            if prev_value is not None and value < prev_value:
              if pid is not None and prev_pid is not None and pid == prev_pid:
                continue
              logging.info(
                  'clear %s, %s between %s and %s due to decreasing number %s vs %s' % (
                      strategy_name, latency_type, start_dt, end_dt, prev_value, value))
              existing_values.clear()
          all_data[(latency_type, exchange_api_request)].append(value)
      prev_pid = pid

    each_stats = []
    for (latency_type, exchange_api_request), value in all_data.items():
      if len(value) == 0:
        expire_dt = to_datetime(expire_timestamp)
        logging.info('no telemetry data for %s, %s between %s and %s' % (
            strategy_name, latency_type, start_dt, end_dt
        ))
        continue
      try:
        exchange_api_req_proto = ExchangeApiRequestProto(
            market_type=exchange_api_request.market_type,
            exchange=exchange_api_request.exchange,
            api_version=exchange_api_request.api_version)
        exchange_api_id = get_exchange_api_id_from_request(
            to_list(exchange_api_req_proto),
            self._exchange_api_universe)[0]
      except Exception:
        logging.info('Fail to get exchange_api_id: %s' % exchange_api_request)
        exchange_api_id = None
      latency_stats = _get_stats(value)
      component_stats = StrategyComponentLatencyStatsProto(
          latency_type=latency_type,
          exchange_api_id=exchange_api_id,
          latency_stats=latency_stats)
      each_stats.append(component_stats)
    stat_ts = int(expire_time * 10**9) if expire_time is not None \
              else self._record_timestamp
    calculator = CalculatorInfoProto(calculator_name=os.path.basename(__file__),
                                     git_commit_sha=self._git_commit_sha,
                                     git_commit_timestamp=self._git_commit_datetime)
    res = StrategyLatencyStatsProto(calculator=calculator,
                                    latency_period=self._time_window_sec,
                                    stat_ts=stat_ts,
                                    each_stats=each_stats)
    return res

  def get_record_time(self) -> float:
    return self._record_timestamp / 1e9

  def update_exchange_api_universe(
      self, exchange_api_universe: typing.Optional[typing.List[ExchangeApiInfoProto]]):
    self._exchange_api_universe = exchange_api_universe

  def update_by_telemetry(self, timestamp: int, pb: StrategyLog):
    self._record_timestamp = timestamp
    self._cache[timestamp] = pb

  def update_by_strat_log(self, timestamp: int, pb: StrategyLog):
    assert isinstance(pb, StrategyLog)
    if pb.type == StrategyLog.TELEMETRY:
      self.update_by_telemetry(timestamp, pb)
      if self._store_raw_data:
        data = json_format.MessageToDict(pb, preserving_proto_field_name=True)
        self._raw_data.append(data)

  def on_log(self, timestamp: int, log: bytes):
    pb = StrategyLog()
    pb.ParseFromString(log)
    self.update_by_strat_log(timestamp, pb)


def launch(root_dir: str,
           trading_date: datetime.datetime,
           machine: str,
           strategy_name: str,
           on_log: typing.Callable[[int, bytes], None]):
  start_time = trading_date  # UTC 0 hour
  hours = 24

  run_from_strat_log_archive(on_log_callback=on_log,
                             start_time=start_time,
                             end_time=start_time + datetime.timedelta(hours=hours),
                             root_dir=root_dir,
                             machine=machine,
                             strategy_name=strategy_name)
  return 0


def main(_):
  root_dir = os.path.expanduser(FLAGS.root_dir)
  trading_date = FLAGS.trading_date
  machine = FLAGS.machine
  strategy_name = FLAGS.strategy_name
  assert trading_date, '--trading_date must be specified.'
  assert machine, '--machine must be specified.'
  assert strategy_name, '--strategy_name must be specified.'
  print('Running for %s %s ...' % (trading_date, machine))

  if FLAGS.print_json:
    onlog = LatencyStatsCalculator(store_raw_data=True)
  else:
    onlog = LatencyStatsCalculator()

  trading_date = datetime.datetime.strptime(trading_date, '%Y%m%d')
  launch(root_dir, trading_date, machine, strategy_name, onlog.on_log)
  if FLAGS.print_json:
    print(json.dumps(onlog.raw_data, indent=2))
    stats = json_format.MessageToDict(onlog.get_latency_stats(expire=False),
                                      preserving_proto_field_name=True)
    print(json.dumps(stats, indent=2))


if __name__ == '__main__':
  flags.DEFINE_string('root_dir', '~/data/strat_proto_log', 'root_dir.')

  flags.DEFINE_string('trading_date', None, 'Trading date in form of %Y%m%d.')

  flags.DEFINE_string('machine', None, 'Instance machine name.')

  flags.DEFINE_string('strategy_name', None, 'Strategy name.')

  flags.DEFINE_bool('print_json', False, 'Print json.')

  logging.basicConfig(level='DEBUG', format='%(levelname)8s %(asctime)s %(name)s] %(message)s')
  app.run(main)
