import asyncio
import datetime
import logging
import os
import time
import signal
import sys
import typing
import traceback
from concurrent.futures import ThreadPoolExecutor

import pandas as pd
import pytz
from absl import app, flags
from google.protobuf.json_format import MessageToDict
from tornado.concurrent import run_on_executor
from tornado.ioloop import IOLoop, PeriodicCallback

import coin.base.database.util as db_util
from coin.base.datetime_util import (to_timestamp_int, to_datetime)
from coin.base.timestamp import get_timestamp
from coin.proto.coin_request_pb2 import (
    ApplicationInfoProto,
    ApplicationRequestProto,
    StrategyInfoProto)
from coin.proto.coin_strategy_pb2 import StrategyLog
from coin.support.alarm.logic.alarm_detector import (
    AlarmDetector,
    get_alarm_config)
from coin.support.alarm.logic.factory import alarm_ctor_info_map
from coin.tool.strat_monitor.util.monitor_util import (
    get_active_strategy,
    get_topics)
from coin.util.queue.config import KafkaConfig
from coin.util.queue.constants import TopicType
from coin.util.queue.reader import AsyncKafkaReader
from coin.util.queue.tools.kafka_archive import run_from_kafka_archive
from coin.util.queue.tools.kafka_topic import parse_kafka_topic

from xunkemgmt_client.support.alarm.database.memcached_importer import (
    MemcachedStrategyAlarmImporter)
from xunkemgmt_client.client.api_client import XunkemgmtClient
from xunkemgmt_client.client.util.query_util import (
    query_applications,
    query_strategies)
from xunkemgmt.service.alarm_service_pb2 import AlarmProto


FLAGS = flags.FLAGS


class AlarmStreamProcessor(object):
  def __init__(self,
               *,
               ioloop: IOLoop,
               kafka_config: KafkaConfig,
               kafka_topic_type: TopicType,
               strategy_infos: typing.List[StrategyInfoProto],
               alarm_frequency: int,
               alarm_after_sec: typing.Optional[int]=None,
               exit_after_min: typing.Optional[int]=None,
               memcached_config: typing.Optional[typing.Dict]=None,
               print_result: bool=False):
    self._ioloop = ioloop
    self._kafka_config = kafka_config
    self._topic_type = kafka_topic_type
    self._strategy_infos = {elem.strategy_name: elem for elem in strategy_infos}
    self._strategy_names = list(self._strategy_infos.keys())
    self._alarm_frequency = alarm_frequency
    alarm_after_sec = 0 if alarm_after_sec is None else alarm_after_sec
    self._alarm_after_sec = datetime.timedelta(seconds=alarm_after_sec)
    self._exit_after = \
        datetime.timedelta.max if exit_after_min is None else \
        datetime.timedelta(minutes=exit_after_min)
    self._memcached_config = memcached_config
    self._memcached_importer = None
    self._print_result = print_result
    self._topics = get_topics(
        self._strategy_names, kafka_config, topic_type=self._topic_type)
    self._warm_up_times = 3
    self._warm_up_to_ts = get_timestamp()
    self._handlers: typing.Dict[str, AlarmDetector] = {}
    self._executor = ThreadPoolExecutor(max_workers=8)

    application_infos = []
    retry = 0
    while retry <= 3:
      try:
        retry += 1
        with XunkemgmtClient() as client:
          application_infos = client.query_applications(ApplicationRequestProto())
        break
      except Exception:
        logging.error(traceback.format_exc())
        continue
    else:
      logging.info('Fail to query application info in initialization.')

    for strategy_name in self._strategy_infos.keys():
      strategy_info = self._strategy_infos[strategy_name]
      alarm_configs = {
          alarm_type: get_alarm_config(strategy_info, alarm_type)
          for alarm_type in alarm_ctor_info_map.keys()
      }
      if strategy_name not in self._handlers:
        self._handlers[strategy_name] = AlarmDetector(
            strategy_info=strategy_info,
            topics=self._topics,
            application_infos=application_infos,
            alarm_configs=alarm_configs,)

  async def _loop(self):
    logging.info('Topics: %s', ', '.join(self._topics))
    try:
      reader = AsyncKafkaReader(self._topics,
                                kafka_config=self._kafka_config,
                                timestamp_from=self._warm_up_to_ts)
      await reader.open()
    except Exception:
      logging.error('Fail to open kafka reader.')
      logging.error(traceback.format_exc())
      os._exit(0)
    try:
      while True:
        msg = None
        try:
          records = await reader.read()
          for msg in records:
            topic = msg.topic
            _, strat_info = parse_kafka_topic(topic)
            pb = StrategyLog()
            pb.ParseFromString(msg.value)
            handler = self._handlers[strat_info.strategy_name]
            handler.handle(int(msg.timestamp * 1e6), strat_info.machine, pb)
        except ConnectionError:
          continue
        except Exception:
          if msg is not None:
            logging.info(msg)
          logging.error(traceback.format_exc())
          continue
    finally:
      # sleep(0.1) to prevent the Segmentation Fault caused by reader.close()
      time.sleep(0.1)
      await reader.close()

    ioloop = IOLoop.current()
    ioloop.stop()

  def _warm_up(self):
    warm_up_times = self._warm_up_times
    to_ts = self._warm_up_to_ts
    offset_nanosec = \
        int(datetime.timedelta(minutes=60).total_seconds() * 10**9)
    from_ts = self._warm_up_to_ts - offset_nanosec
    while warm_up_times > 0:
      logging.info('Warm up from %s to %s' % (to_datetime(from_ts), to_datetime(to_ts)))
      run_from_kafka_archive(topics=self._topics,
                             kafka_config=self._kafka_config,
                             timestamp_from=int(from_ts),
                             timestamp_to=int(to_ts),
                             callback=self.on_kafka_log)
      warm_up_times -= 1
      current_ts = get_timestamp()
      if current_ts - to_ts > 60 * 10**9:
        self._warm_up_to_ts = current_ts
        from_ts = to_ts
        to_ts = self._warm_up_to_ts
      else:
        break
    else:
      logging.info('Warm up not finished in %s times' % self._warm_up_times)
      sys.exit(1)
    logging.info('Warm up end')

  @run_on_executor(executor='_executor')
  def _query_strategies(self) -> typing.Dict[str, StrategyInfoProto]:
    strategies = query_strategies(strategy_names=self._strategy_names, as_proto=True)
    return {elem.strategy_name: elem for elem in strategies}

  @run_on_executor(executor='_executor')
  def _query_applications(self) -> typing.List[ApplicationInfoProto]:
    return query_applications(as_proto=True)

  @run_on_executor(executor='_executor')
  def _query_topics(self) -> typing.List[str]:
    return get_topics(self._strategy_names, self._kafka_config, topic_type=self._topic_type)

  async def _query_meta(self):
    strategies = await asyncio.wait_for(self._query_strategies(), 10)
    applications = await asyncio.wait_for(self._query_applications(), 10)
    topics = await asyncio.wait_for(self._query_topics(), 10)
    for strategy_name, handler in self._handlers.items():
      handler.update_strategy_info(strategies[strategy_name])
      handler.update_application_infos(applications)
      handler.update_topics(topics)

  def _gen_memcached_importer(self) -> typing.Optional[MemcachedStrategyAlarmImporter]:
    if not isinstance(self._memcached_importer, MemcachedStrategyAlarmImporter):
      if self._memcached_config is not None:
        self._memcached_importer = MemcachedStrategyAlarmImporter(self._memcached_config)
      else:
        self._memcached_importer = None
    return self._memcached_importer

  @run_on_executor(executor='_executor')
  def _insert_alarms(self, alarms: AlarmProto):
    memcached_importer = self._gen_memcached_importer()
    if isinstance(memcached_importer, MemcachedStrategyAlarmImporter):
      memcached_importer.insert_strategy_alarms(alarms)

  async def _dump_result(self):
    ts = get_timestamp()
    alarms = []
    for strategy_name, handler in self._handlers.items():
      try:
        alarms.append(handler.get_alarm(ts))
      except Exception as e:
        logging.error('Fail to get alarm of %s' % (strategy_name))
        logging.error(traceback.format_exc())
    if self._print_result:
      alarm_dicts = []
      for alarm in alarms:
        for alarm_value in alarm.alarm_values:
          alarm_dict = {
              'strategy_name': alarm.alarm_info.strategy_name,
              'machines': alarm.alarm_info.machines,
          }
          alarm_dict = {**alarm_dict,
                        **MessageToDict(alarm_value, preserving_proto_field_name=True)}
          alarm_dict['timestamp'] = datetime.datetime.utcfromtimestamp(
              alarm_value.timestamp / 10**9)
          alarm_dicts.append(alarm_dict)
      df = pd.DataFrame(alarm_dicts)
      logging.info('all alarms: \n%s' % df.to_string())
    await asyncio.wait_for(self._insert_alarms(alarms), 1)

  def start(self):
    self._warm_up()
    self._ioloop.add_callback(self._loop)
    PeriodicCallback(self._query_meta, 300 * 1000).start()
    dump_result_func = PeriodicCallback(
        self._dump_result, self._alarm_frequency * 1000).start
    self._ioloop.add_timeout(self._alarm_after_sec, self._dump_result)
    self._ioloop.add_timeout(self._alarm_after_sec, dump_result_func)
    self._ioloop.add_timeout(self._exit_after, self._ioloop.stop)

  def on_kafka_log(self, partition, record):
    _, strat_info = parse_kafka_topic(record.topic)
    pb = StrategyLog()
    pb.ParseFromString(record.value)
    self._handlers[strat_info.strategy_name].handle(
        int(record.timestamp * 1e6), strat_info.machine, pb)

  def signal_handler(self, sig, frame):
    logging.warning('Caught signal: %s', sig)
    os._exit(0)


def main(_):
  ioloop = IOLoop.current()
  if FLAGS.business_unit is not None:
    business_units = [elem.strip() for elem in FLAGS.business_unit.split(',')]
  else:
    business_units = FLAGS.business_unit
  if FLAGS.strategy_name is not None:
    strategy_names = [elem.strip() for elem in FLAGS.strategy_name.split(',')]
  else:
    threshold = to_timestamp_int(
        datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - datetime.timedelta(days=1))
    strategy_names = get_active_strategy(
        business_unit=business_units, update_timestamp_threshold=threshold)
  strategy_infos = query_strategies(strategy_names=strategy_names, as_proto=True)
  memcached_config = None
  if FLAGS.memcached_config is not None:
    memcached_config = db_util.read_db_config(FLAGS.memcached_config)

  processor = AlarmStreamProcessor(
      ioloop=ioloop,
      kafka_config=KafkaConfig.from_cmd_config(FLAGS.kafka_config_filename),
      kafka_topic_type=TopicType[FLAGS.kafka_topic_type],
      strategy_infos=strategy_infos,
      alarm_frequency=FLAGS.alarm_frequency,
      alarm_after_sec=FLAGS.alarm_after_sec,
      exit_after_min=FLAGS.exit_after_min,
      memcached_config=memcached_config,
      print_result=FLAGS.print_result)
  processor.start()
  signal.signal(signal.SIGTERM, processor.signal_handler)
  signal.signal(signal.SIGINT, processor.signal_handler)

  try:
    ioloop.start()
  except (KeyboardInterrupt, SystemExit):
    return


def define_alarm_flags():
  prev_flag_dict = flags.FLAGS.flag_values_dict()
  flags.DEFINE_string('kafka_config_filename',
                      '../../coin_deploy/support_monitor/config/kafka_aws_config.json',
                      'kafka config')

  flags.DEFINE_string('kafka_topic_type', 'STRAT', '')

  flags.DEFINE_string('strategy_name', None, 'comma separated strategy names')

  flags.DEFINE_string('business_unit', None, 'comma separated business_unit')

  flags.DEFINE_integer('alarm_frequency', 60, 'alarm frequency')

  flags.DEFINE_integer('alarm_after_sec', 60, 'alarm after sec')

  flags.DEFINE_boolean('print_result', False, 'Print result')

  flags.DEFINE_string('memcached_config', None, 'memcached config')

  flags.DEFINE_integer('exit_after_min', 240, 'exit after minute')
  curr_flag_dict = flags.FLAGS.flag_values_dict()
  new_flags = [key for key in curr_flag_dict.keys() if key not in prev_flag_dict.keys()]
  return new_flags


if __name__ == '__main__':
  logging.basicConfig(level='INFO', format='%(levelname)8s %(asctime)s %(message)s')
  define_alarm_flags()
  app.run(main)
