package com.yasaka.stock.kafkaConsumer;

//import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.regex.Pattern;

public class TopicPartitionThread extends Thread {

    private ExecutorService workerExecutorService;

    private Semaphore semaphore;

    private Map<TopicPartition, Long> offsetsMap = new HashMap<>();

    private List<Future<String>> taskList = new ArrayList<>();

    public TopicPartitionThread(ExecutorService workerExecutorService, Semaphore semaphore) {
        this.workerExecutorService = workerExecutorService;
        this.semaphore = semaphore;
    }

    private static Properties buildKafkaProperty(String brokers, String groupId) {
        Properties properties = new Properties();
        // zookeeper 配置
        properties.put("zookeeper.connect", brokers);
        // group 代表一个消费组
        properties.put("group.id", groupId);
        // zk连接超时
        properties.put("zookeeper.session.timeout.ms", "4000");
        properties.put("zookeeper.sync.time.ms", "200");
        properties.put("auto.commit.interval.ms", "1000");
        properties.put("auto.offset.reset", "smallest");
        // 序列化类
//        properties.put("bootstrap.servers", brokers);
//        properties.put("group.id", groupId);
//        properties.put("enable.auto.commit", "false");
//        properties.put("auto.commit.interval.ms", "1000");
//        properties.put("session.timeout.ms", "30000");
//        properties.put("max.poll.records", 100); //每次poll最多获取100条数据
//        properties.put("auto.offset.reset", "earliest");
//        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        return properties;
    }

    @Override
    public void run() {
        Properties properties = buildKafkaProperty("120.27.155.126:3731", "apigw-group3");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(/*Arrays.asList("manual.*")*/ "shiyangtopic-test,sytopic-test");
//                new ConsumerRebalanceListener() {
//                    @Override
//                    public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
//                        System.out.println("threadId = { " + Thread.currentThread().getId() + " }, onPartitionsRevoked.");
//                        consumer.commit(offsetsMap,true);
//                    }
//
//                    @Override
//                    public void onPartitionsAssigned(
//                            Collection<TopicPartition> partitions) {
//                        System.out.println("threadId = {" + Thread.currentThread().getId() + "}, onPartitionsAssigned.");
//                        offsetsMap.clear();
//                        //清空taskList列表
//                        taskList.clear();
//                    }
//                });

        //接收kafka消息
        while (Cache.getInstance().isKafkaThreadStatus()) {
            try {
                //使用100ms作为获取超时时间
                Map<String, ConsumerRecords<String, String>> records = consumer.poll(100);
                for (final Map.Entry<String, ConsumerRecords<String, String>> record : records.entrySet()) {
                    semaphore.acquire();
                    //记录当前 TopicPartition和OffsetAndMetadata
                    //里面还有一层遍历
                    ConsumerRecords<String, String> consumerRecords = record.getValue();
                    TopicPartition topicPartition = consumerRecords.records(0).get(0).topicAndPartition();
                    Long offset = new Long(consumerRecords.records(0).get(0).offset());
                    offsetsMap.put(topicPartition, offset);

                    //提交任务到线程池处理
                    taskList.add(workerExecutorService.submit(new WorkThread(consumerRecords.topic(), consumerRecords.records(0).get(0).value(), semaphore)));
                }

                //判断kafka消息是否处理完成
                for (Future<String> task : taskList) {
                    //阻塞，直到消息处理完成
                    task.get();
                }

                //同步向kafka集群中提交offset
                consumer.commit(true);
            } catch (Exception e) {
                System.out.println("TopicPartitionThread run error." + e);
            } finally {
                //清空taskList列表
                taskList.clear();
            }
        }
        //关闭consumer连接
        consumer.close();
    }
}