package com.zmn.oms.task.kafka.listener;

import com.zmn.oms.task.kafka.process.RecordProcessService;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Executor;

/**
 * 类描述：dts kafka
 *
 * @author lujia
 * @date 2018/12/05 14:42
 */
@Slf4j
public class DtsKafkaListener{

    private RecordProcessService recordProcessService;
    private KafkaConsumer<String, byte[]> kafkaConsumer;
    private Executor executor;


    public DtsKafkaListener(RecordProcessService recordProcessService, KafkaConsumer<String, byte[]> kafkaConsumer, Executor executor) {
        this.recordProcessService = recordProcessService;
        this.kafkaConsumer = kafkaConsumer;
        this.executor = executor;
    }

    public DtsKafkaListener() {
    }

    public void start() {
    /*    executor.execute(() -> {
            log.info("#oms#dts#kafka 开始拉取数据");
            while (true) {

                // 出现异常最多重试3次
                for (int i = 0; i < 3; i++) {
                    try {
                        log.info("#oms#dts#kafka threadId------开始-------[{}]",i);
                        // 获取数据
                        poll();
                        // 线程sleep1毫秒降低cpu使用
                        Thread.sleep(1);
                        break;
                    } catch (Exception e) {
                        log.info("#oms#dts#kafka threadId------异常-------[{}]",i);
                        log.error("poll 异常" + e.getMessage(), e);
                        try {
                            Thread.sleep(1000);
                        } catch (InterruptedException ex) {
                            log.error(ex.getMessage(), ex);
                        }
                    }
                }
            }
        });*/

        log.info("#oms#dts#kafka 开始拉取数据====>threadId:{}",Thread.currentThread().getId());
        new Thread(()->{
            log.info("#oms#dts#kafka 启动执行线程:{}",Thread.currentThread().getId());
            while (true) {
                // 出现异常最多重试3次
                for (int i = 0; i < 3; i++) {
                    try {
                        // 获取数据
                        poll();
                        // 线程sleep1毫秒降低cpu使用
                        Thread.sleep(1);
                        break;
                    } catch (Exception e) {
                        log.error("poll 异常" + e.getMessage(), e);
                        try {
                            Thread.sleep(1000);
                        } catch (InterruptedException ex) {
                            log.error(ex.getMessage(), ex);
                        }
                    }
                }
            }
        }).start();
    }

    private void poll() {
        ConsumerRecords<String, byte[]> records = kafkaConsumer.poll(0);

        // Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();//(2)

        for (ConsumerRecord<String, byte[]> record : records) {
            recordProcessService.process(record);
            // currentOffsets.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset()+1, "no metadata")); //(3))
        }

        // kafkaConsumer.commitAsync(currentOffsets, null);
        kafkaConsumer.commitAsync();
    }
}
