package com.cmsz.collection.service;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.client.transport.TransportClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * @author liupeng
 * @date 2019/12/12
 * @description 自动监听消费kafka
 */

@Component
public class KafkaListenerReceiverService {

    @Autowired
    InfoFilter infoFilter;

    @Autowired
    private TransportClient client;

    private static Logger logger = LoggerFactory.getLogger(KafkaListenerReceiverService.class);


    /** 用于储存当前消费的topic、partition、offset、Metadata*/
    private  Map<TopicPartition,OffsetAndMetadata> currentOffsets = new HashMap<>();

    /** 这里是批量消费 */
    @KafkaListener(topics = {"#{'${spring.kafka.listener.topics}'.split(',')}"},
            containerFactory = "kafkaListenerContainerFactory2",
            groupId = "${spring.kafka.listener.receiver.group-id}")
    public void listenLog(List<ConsumerRecord> records, Consumer kafkaConsumer){
        logger.info("开始消费kafka");
        /* 用于存放需要批量入库到es中的数据 */
        BulkRequest bulkRequest = new BulkRequest();
        try{
            for (ConsumerRecord record : records) {
                // 记录当前消费数据的 offset
                currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1));

                String topic = record.topic();
                if (!topic.isEmpty()) {
                    bulkRequest = infoFilter.infoToLog(record,bulkRequest);
                }
            }
            // 批量入库
            client.bulk(bulkRequest);
            // 异步提交偏移
            kafkaConsumer.commitAsync(currentOffsets, new OffsetCommitCallback() {
                /** 回调函数，只处理出现异常的状况，将异常消息打印在error日志中 */
                @Override
                public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                    if(exception != null){
                        exception.printStackTrace();
                        logger.error("",exception);
                    }
                }
            }); // 异步提交偏移
        }catch (Exception e){

            logger.error(e.getMessage());
            try{
                client.bulk(bulkRequest);
                kafkaConsumer.commitSync();// 同步提交偏移
            }catch (Exception e1){
                logger.error(e.getMessage());
            }
        }
    }




}
