import kafka

from coin.base.datetime_util import to_timestamp_int


def _to_timestamp_ms_or_none(ts):
  return to_timestamp_int(ts) // 1000000 if ts is not None else None  # ms


def _get_offset_from_timestamp(consumer, partitions, timestamp):
  assert timestamp is not None
  timestamps = {partition: timestamp for partition in partitions}
  offsets = consumer.offsets_for_times(timestamps)
  offsets = {
      partition: (offset.offset if offset is not None else None) for partition,
      offset in offsets.items()
  }
  return offsets


def _calculate_read_range_offset(consumer, partitions, timestamp_from, timestamp_to):
  begin_offsets = consumer.beginning_offsets(partitions)
  end_offsets = consumer.end_offsets(partitions)

  if timestamp_from is None:
    ts_from_offsets = {partition: begin_offsets[partition] for partition in partitions}
  else:
    ts_from_offsets = \
        _get_offset_from_timestamp(consumer, partitions, timestamp_from)

  if timestamp_to is None:
    ts_to_offsets = {partition: end_offsets[partition] for partition in partitions}
  else:
    ts_to_offsets = \
        _get_offset_from_timestamp(consumer, partitions, timestamp_to)

  read_from_offsets = {}
  read_to_offsets = {}
  for partition in partitions:
    ts_from_offset = ts_from_offsets[partition]
    ts_to_offset = ts_to_offsets[partition]
    begin_offset = begin_offsets[partition]
    end_offset = end_offsets[partition]

    read_from_offsets[partition] = \
        max(ts_from_offset, begin_offset) if ts_from_offset is not None else None

    read_to_offsets[partition] = \
        min(end_offset, ts_to_offset) if ts_to_offset is not None else end_offset

  return read_from_offsets, read_to_offsets


def run_from_kafka_archive(*, topics, kafka_config, timestamp_from, timestamp_to, callback):
  timestamp_from = _to_timestamp_ms_or_none(timestamp_from)
  timestamp_to = _to_timestamp_ms_or_none(timestamp_to)

  consumer = kafka.KafkaConsumer(bootstrap_servers=kafka_config.kafka_servers,
                                 api_version_auto_timeout_ms=300000,
                                 enable_auto_commit=False)
  partitions = []
  for topic in topics:
    partition_ids = consumer.partitions_for_topic(topic)
    partitions += [kafka.TopicPartition(topic, partition_id) for partition_id in partition_ids]

  read_from_offsets, read_to_offsets = _calculate_read_range_offset(consumer, partitions,
                                                                    timestamp_from, timestamp_to)

  end_partitions = set()
  consumer.assign(partitions)
  for partition, offset in read_from_offsets.items():
    if offset is None or offset > read_to_offsets[partition]:
      end_partitions.add(partition)
    else:
      consumer.seek(partition, offset)

  while len(end_partitions) < len(partitions):
    fetched = consumer.poll()
    for partition, records in fetched.items():
      for record in records:
        if record.offset + 1 <= read_to_offsets[partition]:
          callback(partition, record)
        if record.offset + 1 >= read_to_offsets[partition]:
          end_partitions.add(partition)
          break
