import kafka
import collections
from typing import List, Optional

from coin.support.data_replay.reader.interface import (
    RawData,
    IRawDataReader)
from coin.util.queue.tools.kafka_archive import _calculate_read_range_offset
from coin.util.queue.config import KafkaConfig


class KafkaReader(IRawDataReader):
  def __init__(self,
               topics: List[str],
               *,
               kafka_config: KafkaConfig,
               start_timestamp: int,
               end_timestamp: int):
    self._topics = topics
    self._kafka_config = kafka_config
    self._record_cache = collections.deque()
    self._finished = False
    self._kafka_consumer = kafka.KafkaConsumer(
        bootstrap_servers=self._kafka_config.kafka_servers,
        enable_auto_commit=False)
    start_timestamp = start_timestamp // 1000000
    end_timestamp = end_timestamp // 1000000
    self._partitions = self._query_partitions()
    self._end_partitions = set()
    self._read_from_offsets, self._read_to_offsets = _calculate_read_range_offset(
        self._kafka_consumer, self._partitions, start_timestamp, end_timestamp)
    self._init_kafka_consumer()

  def _query_partitions(self):
    partitions = []
    for topic in self._topics:
      partition_ids = self._kafka_consumer.partitions_for_topic(topic)
      partitions += [kafka.TopicPartition(topic, partition_id) for partition_id in partition_ids]
    return partitions

  def _init_kafka_consumer(self):
    self._kafka_consumer.assign(self._partitions)
    for partition, offset in self._read_from_offsets.items():
      if offset is None or offset > self._read_to_offsets[partition]:
        self._end_partitions.add(partition)
      else:
        self._kafka_consumer.seek(partition, offset)

  def _update_record_cache(self):
    if len(self._end_partitions) < len(self._partitions):
      fetched = self._kafka_consumer.poll(timeout_ms=100)
      record_cache = []
      for partition, records in fetched.items():
        for record in records:
          if record.offset + 1 <= self._read_to_offsets[partition]:
            record_cache.append(record)
          if record.offset + 1 >= self._read_to_offsets[partition]:
            self._end_partitions.add(partition)
            break
        self._record_cache = collections.deque(record_cache)
    else:
      self._finished = True

  def _read_impl(self, *, consume: bool) -> Optional[RawData]:
    ret = None
    if not self._finished:
      while len(self._record_cache) == 0:
        self._update_record_cache()
        if self._finished:
          break
      if len(self._record_cache) == 0:
        assert self._finished
      else:
        record = self._record_cache.popleft() if consume else self._record_cache[0]
        ret = RawData(timestamp=int(record.timestamp * 1e6), value=record.value)
    return ret

  def read(self) -> Optional[RawData]:
    return self._read_impl(consume=True)

  def peek(self) -> Optional[RawData]:
    return self._read_impl(consume=False)


if __name__ == '__main__':
  import datetime
  import json
  import google.protobuf.json_format as json_format
  from coin.base.datetime_util import to_datetime, to_timestamp_int
  from coin.proto.coin_strategy_pb2 import StrategyLog

  kafka_config_file = '../../coin_deploy/support_monitor/config/kafka_aws_config.json'
  reader = KafkaReader(
      topics=[
        'STRAT_strategy-327.ap-northeast-1_vmm_sfty_delta_hedge',
        'STRAT_strategy-327.ap-northeast-1_vmm_cfx_delta_hedge'
      ],
      kafka_config=KafkaConfig.from_cmd_config(kafka_config_file),
      start_timestamp=to_timestamp_int(datetime.datetime(2023, 6, 11)),
      end_timestamp=to_timestamp_int(datetime.datetime(2023, 6, 11, 1))
  )
  datas = []
  prev_ts = 0
  while True:
    record = reader.read()
    if record is None:
      break
    pb = StrategyLog()
    pb.ParseFromString(record.value)
    data = json_format.MessageToDict(pb, preserving_proto_field_name=True)
    curr_ts = record.timestamp
    #assert curr_ts >= prev_ts, (prev_ts, curr_ts)
    prev_ts = curr_ts
    hr_ts = to_datetime(curr_ts).strftime('%Y%m%d %H:%M:%S.%f')
    data['record_timestamp'] = curr_ts
    data['record_timestamp_human_readable'] = hr_ts
    datas.append(data)
  records = '[\n' + ',\n'.join(json.dumps(record) for record in datas) + '\n]'
  print(records)
