package com.juneyao.log.logcenter.kafka;

import com.juneyao.log.logcenter.elasticsearch.ElasticStorageService;
import com.juneyao.log.logcenter.elasticsearch.async.ElasticStorageAsync;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;

import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

/**
 * @author quanbiao
 * @date 2018-07-26
 */
public class LogOperateListener {

    /**
     * 系统日志-索引名称前缀，按天生成索引，索引名字后缀是日期
     */
    @Value("${elasticsearch.operate.index.name.prefix}")
    private String prefixIndexName;

    @Autowired
    private ElasticStorageService storageService;

    /**
     * 核心线程
     */
    private static final int CORE_POOL_SIZE = 5;

    /**
     * 最大线程
     */
    private static final int MAX_NUM_POOL_SIZE = 10;

    /**
     * 线程空闲存活时间
     */
    private static final long THREAD_POOL_KEEP_ALIVE_TIME = 10000L;

    /**
     * 线程池,异步存储es数据
     */
    private static final ThreadPoolExecutor elasticThreadPool;

    /**
     * 初始化线程池
     */
    static {
        elasticThreadPool = new ThreadPoolExecutor(
                CORE_POOL_SIZE,
                MAX_NUM_POOL_SIZE,
                THREAD_POOL_KEEP_ALIVE_TIME,
                TimeUnit.MILLISECONDS,
                new LinkedBlockingQueue<>());
        elasticThreadPool.allowCoreThreadTimeOut(true);
    }


    /**
     * 监听第一分区的kafka消息
     * @param recordList
     */
    @KafkaListener(topicPartitions={@TopicPartition(partitions={"0"},topic="${kafka.topic.operate}")})
    public void listenerP0(List<ConsumerRecord<String, String>> recordList) {
        elasticThreadPool.submit(new ElasticStorageAsync(storageService, recordList, prefixIndexName));
    }

    /**
     * 监听第二分区的kafka消息
     * @param recordList
     */
    @KafkaListener(topicPartitions={@TopicPartition(partitions={"1"},topic="${kafka.topic.operate}")})
    public void listenerP1(List<ConsumerRecord<String, String>> recordList) {
        elasticThreadPool.submit(new ElasticStorageAsync(storageService, recordList, prefixIndexName));
    }

    /**
     * 监听第三分区的kafka消息
     * @param recordList
     */
    @KafkaListener(topicPartitions={@TopicPartition(partitions={"2"},topic="${kafka.topic.operate}")})
    public void listenerP2(List<ConsumerRecord<String, String>> recordList) {
        elasticThreadPool.submit(new ElasticStorageAsync(storageService, recordList, prefixIndexName));
    }

    /**
     * 监听第四分区的kafka消息
     * @param recordList
     */
    @KafkaListener(topicPartitions={@TopicPartition(partitions={"3"},topic="${kafka.topic.operate}")})
    public void listenerP3(List<ConsumerRecord<String, String>> recordList) {
        elasticThreadPool.submit(new ElasticStorageAsync(storageService, recordList, prefixIndexName));
    }
}
