package com.qyer.commons.kafka;

import com.qyer.commons.concurrent.GenericLinkedBlockingQueue;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;

import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * User: Z J Wu Date: 2016/07/22 Time: 14:55 Package: com.qyer.commons.kafka
 */
public class RecordsProcessor<T extends SelfSerializableRecord<T>, R1, R2> extends
  BasicQueueOperationComponent<T, R1, R2> implements Callable<Integer> {

  private ConsumerRecords<String, T> records;
  private final AtomicBoolean innerDone;
  private final GenericLinkedBlockingQueue<T> outQueue;
  private final CommitMap commitMap;
  private long takeFromQueueTime;

  public RecordsProcessor(CommitMap commitMap, AtomicBoolean innerDone,
                          GenericLinkedBlockingQueue<T> outQueue) {
    this.commitMap = commitMap;
    this.innerDone = innerDone;
    this.outQueue = outQueue;
  }

  @Override
  public Integer call() throws Exception {
    innerDone.set(false);
    int cnt = 0;
    try {
      for (TopicPartition partition : records.partitions()) {
        for (final ConsumerRecord<String, T> record : records.records(partition)) {
          ++cnt;
          long currentOffset = record.offset();
          /*
          * 成功假定.
          * 为了避免因为commit失败导致的重复消费, 从kafka拿到数据后直接假定后续一定会处理成功.
          * 因此commit在前, 实际处理在后. 此外, 因为转交的out队列是单个内容为单位, 因此每
          * 处理一个partition中的record, 都要提交一次. 如果n次提交失败后, 无论如何也不处理
          * */
          T t = record.value();
          if (t == null) {
            continue;
          }
          Summary summary = t.getSummary();
          summary.setDequeueTime(takeFromQueueTime);
          summary.setBeginProcessingTime(System.currentTimeMillis());
          summary.setPartition(partition.partition());
          summary.setOffset(currentOffset);
          beforeOperation(t);
          outQueue.put(t);
          commitMap.put(partition, currentOffset);
          afterOperation(t);
        }
      }
    } finally {
      clear();
    }
    return cnt;
  }

  public void setTakeFromQueueTime(long takeFromQueueTime) {
    this.takeFromQueueTime = takeFromQueueTime;
  }

  public void setRecords(ConsumerRecords<String, T> records) {
    this.records = records;
  }

  public void clear() {
    innerDone.set(true);
    takeFromQueueTime = -1;
    records = null;
  }
}
