package com.qyer.commons.kafka;

import static com.google.common.base.Preconditions.checkArgument;
import static com.qyer.commons.utils.ReentrantExceptionOperator.buildOperationWithSleep;
import static org.apache.commons.lang3.StringUtils.isNotBlank;

import com.google.common.collect.Lists;
import com.qyer.commons.concurrent.GenericLinkedBlockingQueue;
import com.qyer.commons.utils.ReentrantExceptionOperator;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.WakeupException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * User: Z J Wu Date: 2016/07/22 Time: 14:53 Package: com.qyer.commons.kafka
 */
public class GenericKafkaConsumer<T extends SelfSerializableRecord<T>, R1, R2> extends
  BasicQueueOperationComponent<T, R1, R2> implements Runnable {

  public static class Builder<T extends SelfSerializableRecord<T>, R1, R2> {

    private String name;
    private String topic;
    private GenericLinkedBlockingQueue<T> outQueue;
    private CountDownLatch initSignal;
    private CountDownLatch exitSignal;
    private int recommitTimes;
    private long recommitInterval;
    private String confPath;

    private OperationHook<T, R1> beforeTakeHook;
    private OperationHook<T, R2> afterTakeHook;

    public Builder() {
    }

    public Builder<T, R1, R2> name(String name) {
      this.name = name;
      return this;
    }

    public Builder<T, R1, R2> topic(String topic) {
      this.topic = topic;
      return this;
    }

    public Builder<T, R1, R2> outQueue(GenericLinkedBlockingQueue<T> outQueue) {
      this.outQueue = outQueue;
      return this;
    }

    public Builder<T, R1, R2> initSignal(CountDownLatch initSignal) {
      this.initSignal = initSignal;
      return this;
    }

    public Builder<T, R1, R2> exitSignal(CountDownLatch exitSignal) {
      this.exitSignal = exitSignal;
      return this;
    }

    public Builder<T, R1, R2> recommitTimes(int recommitTimes) {
      this.recommitTimes = recommitTimes;
      return this;
    }

    public Builder<T, R1, R2> recommitInterval(long recommitInterval) {
      this.recommitInterval = recommitInterval;
      return this;
    }

    public Builder<T, R1, R2> config(String confPath) {
      this.confPath = confPath;
      return this;
    }

    public Builder<T, R1, R2> registerBeforeTakeHook(OperationHook<T, R1> beforeTakeHook) {
      this.beforeTakeHook = beforeTakeHook;
      return this;
    }

    public Builder<T, R1, R2> registerAfterTakeHook(OperationHook<T, R2> afterTakeHook) {
      this.afterTakeHook = afterTakeHook;
      return this;
    }

    public GenericKafkaConsumer<T, R1, R2> build() {
      checkArgument(isNotBlank(name), "Consumer name is blank.");
      checkArgument(isNotBlank(topic), "Consumer topic is blank.");
      checkArgument(isNotBlank(confPath), "Consumer configuration file path is blank.");
      checkArgument(outQueue != null, "Input queue is null.");
      checkArgument(initSignal != null, "Init signal is null.");
      checkArgument(exitSignal != null, "Exit signal is null.");
      if (recommitTimes <= 0) {
        recommitTimes = 3;
      }
      if (recommitInterval <= 0) {
        recommitInterval = 300;
      }
      GenericKafkaConsumer<T, R1, R2> consumer = new GenericKafkaConsumer<>(name, topic,
                                                                            recommitTimes,
                                                                            recommitInterval,
                                                                            confPath, outQueue,
                                                                            initSignal, exitSignal);
      // 因为从kafka读取内容到实际把内容提交, 转发到别的队列是一个复合过程
      // 因此before钩子应该发生在consumer.poll()之前,
      // 而after发生在成功提交, 并且由processor放入后续的outQueue成功之后
      consumer.setBeforeOperationHook(beforeTakeHook);
      consumer.getProcessor().setAfterOperationHook(afterTakeHook);
      return consumer;
    }

  }

  private static final Logger LOGGER = LoggerFactory.getLogger(GenericKafkaConsumer.class);

  private final String name;
  private final String topic;
  private final Consumer<String, T> consumer;
  private final AtomicBoolean running;
  private final GenericLinkedBlockingQueue<T> outQueue;
  private final CountDownLatch initSignal;
  private final CountDownLatch exitSignal;
  private final PartitionAssignment assignment = new PartitionAssignment();

  // 提交控制参数, 包括尝试几次提交, 提交后休息多久再次执行提交
  private final int recommitTimes;
  private final long recommitInterval;

  // 提交控制和处理控制
  private final CommitMap commitMap;
  private final AtomicBoolean innerDone;
  private final RecordsProcessor<T, R1, R2> processor;

  private GenericKafkaConsumer(String name, String topic, int recommitTimes, long recommitInterval,
                               String confPath, GenericLinkedBlockingQueue<T> queue,
                               CountDownLatch initSignal, CountDownLatch exitSignal) {
    this.name = name;
    this.outQueue = queue;
    this.topic = topic;
    InputStream is = this.getClass().getClassLoader().getResourceAsStream(confPath);
    Properties props = new Properties();
    try {
      props.load(is);
    } catch (IOException e) {
      throw new IllegalArgumentException(e);
    }
    this.recommitTimes = recommitTimes;
    this.recommitInterval = recommitInterval;
    this.consumer = new KafkaConsumer<>(props);
    this.running = new AtomicBoolean(true);
    this.initSignal = initSignal;
    this.exitSignal = exitSignal;
    this.commitMap = new CommitMap();
    this.innerDone = new AtomicBoolean(true);
    this.processor = new RecordsProcessor<>(commitMap, innerDone, outQueue);
  }

  public RecordsProcessor<T, R1, R2> getProcessor() {
    return processor;
  }

  // 由外部调用
  public void shutdown() {
    running.set(false);
    consumer.wakeup();
  }

  @Override
  public void run() {
    LOGGER.info("Kafka consumer({}) started.", name);
    final ReentrantKafkaCommit<T> rc = new ReentrantKafkaCommit(consumer);
    ReentrantExceptionOperator<Void> commitOperator = buildOperationWithSleep(recommitTimes, rc,
                                                                              TimeUnit.MILLISECONDS,
                                                                              recommitInterval);

    final List<String> topics = Lists.newArrayList(topic);
    // 该回调由本线程调用, 因此线程安全
    CommitWhenRebalanceListener l = new CommitWhenRebalanceListener(rc, commitOperator, assignment,
                                                                    initSignal);
    consumer.subscribe(topics, l);
    LOGGER.info("Kafka consumer successfully joined topic({}).", consumer.subscription());
    ExecutorService es = Executors.newSingleThreadExecutor();
    boolean paused = false;
    try {
      while (running.get()) {
        // 因为由别的线程执行实际作业, 因此每次执行新的poll之前先把现在的处理进度提交一下.
        rc.prepareCommit(commitMap.getCopy());
        try {
          commitOperator.tryOperate();
        } catch (Exception e) {
          LOGGER.error("Commit failed", e);
          continue;
        } finally {
          commitOperator.clear();
        }
        /*
        * 通过任务完成状态以及pause状态决定任务处理流程.
        * 两种需要调用pause或resume的情况
        * 1. 任务完成了, 但是当前consumer处于pause状态 -> 恢复consumer继续消费
        * 2. 任务没完成, 超过一定时间实际操作线程没结束 -> 调用pause暂停consumer从所有分区消费内容, 心跳仍然正常.
        * */
        if (innerDone.get() && paused) {
          LOGGER.info("Processing finished, poll will be resumed.");
          consumer.resume(assignment.getAssignment());
          paused = false;
          continue;
        }

        /*
         * FIXME: 2016/06/21 应当处理一下poll不停报错的情况
         * poll失败超过一定次数限制应当直接抛出异常结束本线程. 然后通过可视化状态显示当前主要consumer的
         * 工作情况, 允许的话还应当提供手动restart的情况.
         */
        ConsumerRecords<String, T> records = consumer.poll(1000);
        if (records == null || records.isEmpty()) {
          continue;
        }
        beforeOperation(null);
        processor.setRecords(records);
        processor.setTakeFromQueueTime(System.currentTimeMillis());
        Future<Integer> operationFuture = es.submit(processor);
        try {
          operationFuture.get(3, TimeUnit.SECONDS);
        } catch (TimeoutException e) {
          LOGGER.info("Time out when processing. Processing will continue, poll will be paused.");
          consumer.pause(assignment.getAssignment());
          paused = true;
        } catch (Exception e) {
          LOGGER.warn("Operation failed.", e);
        }
      }
    } catch (WakeupException e) {
      if (running.get()) {
        LOGGER.warn("Consumer interrupted.");
      }
    } finally {
      consumer.close();
      List<Runnable> droppedTask = es.shutdownNow();
      LOGGER
        .info("Kafka consumer({}) stopped, {} task has been dropped.", name, droppedTask.size());
      exitSignal.countDown();
    }
  }

  public String getName() {
    return name;
  }

  public String getTopic() {
    return topic;
  }

  public int getRecommitTimes() {
    return recommitTimes;
  }

  public long getRecommitInterval() {
    return recommitInterval;
  }
}
