import concurrent.futures
import datetime
import functools
import logging
import traceback
import pandas
import tabulate
import typing

from absl import (app, flags)
from google.protobuf.json_format import MessageToDict

import coin.proto.coin_data_replay_pb2 as cdr_pb2
import xunkemgmt.service.alarm_service_pb2 as as_pb2
from coin.base.datetime_util import (
    convert_string_to_datetime,
    get_dt_ranges,
    to_timestamp_int)
from coin.base.param_util import to_list
from coin.proto.coin_request_pb2 import (
    ApplicationInfoProto,
    ApplicationRequestProto,
    StrategyInfoProto)
from coin.proto.coin_strategy_pb2 import StrategyLog
from coin.support.data_replay.driver import (
    DataReplayDriver,
    DataReplayHandler)
from coin.support.alarm.logic.alarm_detector import (
    AlarmDetector,
    get_alarm_config,)
from coin.support.alarm.logic.factory import alarm_ctor_info_map
from coin.support.proto_log.logic.log_info import StratInfo
from coin.support.pta.util.info_util import get_strat_info_list
from coin.tool.strat_monitor.util.monitor_util import (
    get_active_strategy,
    get_topics)
from coin.util.queue.config import KafkaConfig
from coin.util.queue.constants import TopicType
from xunkemgmt_client.client.api_client import XunkemgmtClient
from xunkemgmt_client.client.util.query_util import query_strategies


FLAGS = flags.FLAGS

WARMUP_OFFSET_TS = 60 * 60 * 10**9


def _dump_alarms(alarms: typing.List[as_pb2.AlarmProto], output_filepath: str):
  alarm_dicts = []
  for alarm in alarms:
    for alarm_value in alarm.alarm_values:
      alarm_dict = {'strategy_name': alarm.alarm_info.strategy_name}
      alarm_dict = {**alarm_dict,
                    **MessageToDict(alarm_value, preserving_proto_field_name=True)}
      alarm_dict['time'] = datetime.datetime.utcfromtimestamp(
          int(alarm_dict['timestamp']) / 10**9).strftime('%Y%m%dT%H%M%S')
      alarm_dicts.append(alarm_dict)
  df = pandas.DataFrame(alarm_dicts)
  if not df.empty:
    df = df.sort_values(['alarm_type', 'strategy_name', 'time']).reset_index(drop=True)
  alarm_str = tabulate.tabulate(df, headers="keys")
  if output_filepath is not None:
    with open(output_filepath, "w") as fp:
      fp.write(alarm_str)
  else:
    print(alarm_str)


class StrategyAlarmReplayer(DataReplayHandler):
  def __init__(self,
               *,
               strat_info: StratInfo,
               start_ts: int,
               topics: typing.List[str],
               application_infos: typing.List[ApplicationInfoProto]):
    self._start_ts = start_ts
    strat_info_proto = StrategyInfoProto(
        strategy_name=strat_info.strategy_name, active=True)
    alarm_configs = {
        alarm_type: get_alarm_config(strat_info_proto, alarm_type)
        for alarm_type in alarm_ctor_info_map.keys()
    }
    self._alarm_detector = AlarmDetector(
        strategy_info=strat_info_proto,
        application_infos=application_infos,
        topics=topics,
        alarm_configs=alarm_configs,
        current_ts=start_ts,
        ignored_alarm_types=[as_pb2.TOPIC_NOT_FOUND_ALARM,])
    self._alarms = []

  @property
  def alarms(self):
    return self._alarms

  def on_strat_log(self, topic_info: cdr_pb2.TopicDataInfo, ts: int, data: StrategyLog):
    self._alarm_detector.handle(ts, topic_info.machine, data)

  def on_timestamp(self, ts: int):
    if ts > self._start_ts:
      alarm = self._alarm_detector.get_alarm(ts)
      self._alarms.append(alarm)


class AlarmDumperRunner(object):
  def __init__(self,
               *,
               strat_list: typing.List[str],
               kafka_config_filename: typing.Optional[str],
               strat_log_root_dir: typing.Optional[str],
               max_workers: int,
               start_ts: int,
               end_ts: int,
               alarm_frequency: int):
    self._kafka_config_filename = kafka_config_filename
    self._strat_log_root_dir = strat_log_root_dir
    self._max_workers = max_workers
    self._start_ts = start_ts
    self._end_ts = end_ts
    self._alarm_frequency = alarm_frequency
    self._alarms = []
    start_date = datetime.datetime.utcfromtimestamp(self._start_ts / 10**9).date()
    end_date = datetime.datetime.utcfromtimestamp(self._end_ts / 10**9).date()
    kafka_config = None
    if kafka_config_filename is not None:
      kafka_config = KafkaConfig.from_cmd_config(kafka_config_filename)
    self._strat_infos = get_strat_info_list(
        start_date=start_date, end_date=end_date, strategy_list=strat_list,
        kafka_config=kafka_config, strat_log_root_dir=strat_log_root_dir)
    with XunkemgmtClient() as client:
      self._application_infos = client.query_applications(ApplicationRequestProto())
    self._topics = []
    strategy_names = [info.strategy_name for info in self._strat_infos]
    if kafka_config is not None:
      self._topics = get_topics(
          strategy_names, kafka_config, topic_type=TopicType['STRAT'])

  def _replay_alarm(self, *, strat_info: StratInfo):
    alarm_replayer = StrategyAlarmReplayer(
        strat_info=strat_info,
        start_ts=self._start_ts,
        topics=self._topics,
        application_infos=self._application_infos)
    start_ts = self._start_ts - WARMUP_OFFSET_TS
    data_req = cdr_pb2.DataReplayRequest(
        topic_type=cdr_pb2.STRATEGY,
        root_dir=self._strat_log_root_dir,
        kafka_config_file=self._kafka_config_filename,
        strategy_names=to_list(strat_info.strategy_name),
        machines=to_list(strat_info.machine))
    driver_config = cdr_pb2.DataReplayDriverConfig(
        start_timestamp=start_ts,
        end_timestamp=self._end_ts,
        interval_sec=self._alarm_frequency,
        requests=to_list(data_req))
    driver = DataReplayDriver(driver_config, alarm_replayer)
    driver.run()
    return alarm_replayer.alarms

  @property
  def alarms(self):
    return self._alarms

  def _reset_alarms(self):
    self._alarms = []

  def run(self) -> int:
    func_list = []
    for strat_info in self._strat_infos:
      replay_func = functools.partial(self._replay_alarm, strat_info=strat_info)
      func_list.append((strat_info, replay_func))
    num_func = len(func_list)
    self._reset_alarms()
    alarms = []
    success = 0
    max_workers = self._max_workers
    if max_workers > 1:
      with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
        fut_to_strat_info = {executor.submit(func): key for key, func in func_list}
        done_futs, not_done_futs = concurrent.futures.wait(fut_to_strat_info.keys(),
                                                           timeout=None)
        func_list = [(fut_to_strat_info[fut], fut.result) for fut in done_futs]
        for fut in not_done_futs:
          strat_info = fut_to_strat_info[fut]
          logging.error('Fail to dump strategy log due to timeout. %s',
                        strat_info._asdict())
        for pid, process in executor._processes.items():
          process.terminate()

    for key, func in func_list:
      try:
        alarms.extend(func())
        success += 1
      except Exception as e:
        logging.error('Fail to dump strategy log. %s, %s, %s', key._asdict(), type(e), e)
        logging.error(traceback.format_exc())
    logging.info('Success rate: %s/%s' % (success, num_func))
    num_fail = num_func - success
    self._alarms = alarms
    return num_fail


def main(_):
  start_time = FLAGS.start_time
  assert start_time, '--start_time must be specified.'
  end_time = FLAGS.end_time
  assert end_time, '--end_time must be specified.'
  start_dt = convert_string_to_datetime(start_time)
  end_dt = convert_string_to_datetime(end_time)
  dt_ranges = get_dt_ranges(start_dt, end_dt, FLAGS.aggregate)

  strategy_universe = query_strategies(as_proto=True)
  if FLAGS.strategy_name is not None:
    strat_list = [elem.strip() for elem in FLAGS.strategy_name.split(',')]
  else:
    threshold = to_timestamp_int(start_dt)
    strat_list = get_active_strategy(update_timestamp_threshold=threshold)
  filtered = strategy_universe
  if FLAGS.business_unit is not None:
    business_units = [elem.strip() for elem in FLAGS.business_unit.split(',')]
    filtered = [elem for elem in filtered if elem.business_unit in business_units]
  if FLAGS.strategy_group is not None:
    strategy_groups = [elem.strip() for elem in FLAGS.strategy_group.split(',')]
    filtered = [elem for elem in filtered if elem.strategy_group in strategy_groups]
  strat_list = list(set(strat_list).intersection(
      set([elem.strategy_name for elem in filtered])))

  for start_dt, end_dt in dt_ranges:
    start_td_str = start_dt.strftime('%Y%m%dT%H%M%S')
    end_td_str = end_dt.strftime('%Y%m%dT%H%M%S')
    logging.info('\nRunning for %s-%s ...' % (start_td_str, end_td_str))
    start_ts = to_timestamp_int(start_dt)
    end_ts = to_timestamp_int(end_dt)
    alarm_dumper_runner = AlarmDumperRunner(
        strat_list=strat_list,
        kafka_config_filename=FLAGS.kafka_config_filename,
        strat_log_root_dir=FLAGS.root_dir,
        max_workers=FLAGS.max_workers,
        start_ts=start_ts,
        end_ts=end_ts,
        alarm_frequency=FLAGS.alarm_frequency,)
    alarm_dumper_runner.run()
    _dump_alarms(alarm_dumper_runner.alarms, output_filepath=FLAGS.output_filepath)


if __name__ == '__main__':
  logging.basicConfig(level='INFO', format='%(levelname)8s %(asctime)s %(message)s')

  flags.DEFINE_string('start_time', None, 'Start date in form of %Y%m%d.')
  flags.DEFINE_string('end_time', None, 'End date in form of %Y%m%d.')
  flags.DEFINE_integer('max_workers', 24, 'Max number of workers.')
  flags.DEFINE_string('root_dir', None, '')
  flags.DEFINE_boolean('aggregate', False, '')
  flags.DEFINE_string('kafka_config_filename', None, 'kafka config')
  flags.DEFINE_string('strategy_name', None, 'comma separated strategy names')
  flags.DEFINE_string('strategy_group', None, 'filter by strategy_group')
  flags.DEFINE_string('business_unit', None, 'filter by business_unit')
  flags.DEFINE_string('output_filepath', None, 'output_filepath')
  flags.DEFINE_integer('alarm_frequency', 1800, 'alarm frequency')
  app.run(main)
