# Copyright (c) 2018 Presto Labs Pte. Ltd.
# Author: jaewon

import datetime
import logging
from concurrent.futures import ThreadPoolExecutor

import kafka
from tornado.concurrent import run_on_executor

from coin.base.datetime_util import to_timestamp_int
from coin.base.param_util import to_list


class AsyncKafkaReader(object):
  def __init__(self, topics, *, kafka_config, timestamp_from=None, logger=None):
    self._logger = logger or logging.getLogger(__name__)
    self._topics = to_list(topics)
    self._timestamp_from = timestamp_from

    self._kafka_config = kafka_config
    assert kafka_config is not None

    self._kafka_consumer = None
    self._stop = False
    self._executor = ThreadPoolExecutor(max_workers=1)

  def __del__(self):
    self._executor.shutdown(wait=True)

  async def open(self):
    assert self._kafka_consumer is None
    await self._init_kafka_consumer()

  async def close(self):
    if self._kafka_consumer is None:
      return
    self._stop = True
    await self._close_kafka_consumer()

  @run_on_executor(executor='_executor')
  def read(self, timeout_ms=None):
    if timeout_ms is not None:
      deadline = (datetime.datetime.now() + datetime.timedelta(milliseconds=timeout_ms))
    else:
      deadline = datetime.datetime.max

    while datetime.datetime.now() < deadline:
      if self._kafka_consumer is None:
        raise ConnectionError()
      fetched = self._kafka_consumer.poll(timeout_ms=100)
      if self._stop:
        raise ConnectionError()
      if fetched:
        return sum(fetched.values(), [])

    raise TimeoutError()

  def _query_partitions(self):
    partitions = []
    for topic in self._topics:
      partition_ids = self._kafka_consumer.partitions_for_topic(topic)
      partitions += [kafka.TopicPartition(topic, partition_id) for partition_id in partition_ids]
    return partitions

  @run_on_executor(executor='_executor')
  def _init_kafka_consumer(self):
    self._kafka_consumer = kafka.KafkaConsumer(bootstrap_servers=self._kafka_config.kafka_servers,
                                               enable_auto_commit=False)

    try:
      partitions = self._query_partitions()
      if self._timestamp_from is None:
        offsets = {partition: None for partition in partitions}
      else:
        timestamp_ms = to_timestamp_int(self._timestamp_from) // 1000000  # ms
        timestamps = {partition: timestamp_ms for partition in partitions}
        offsets = self._kafka_consumer.offsets_for_times(timestamps)

      self._kafka_consumer.assign(partitions)
      for partition, offset in offsets.items():
        if offset:
          self._kafka_consumer.seek(partition, offset.offset)
        else:
          self._kafka_consumer.seek_to_end(partition)

    except BaseException:
      logging.exception('Error occured while initializing kafka consumer')
      consumer, self._kafka_consumer = self._kafka_consumer, None
      consumer.close(autocommit=False)

  @run_on_executor(executor='_executor')
  def _close_kafka_consumer(self):
    if self._kafka_consumer is not None:
      consumer, self._kafka_consumer = self._kafka_consumer, None
      consumer.close(autocommit=False)
