package com.es.demo.main.kafka;

import com.es.demo.main.IResultHandler;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.List;
import java.util.Properties;


public class KafkaServer  {
    private KafkaConsumer<String, String> consumer;
    private boolean isRuning = true;


    private IResultHandler iResultHandler;

    public void init(String serverHost, String groupId , List<String> topics) {
        Properties props = new Properties();
        props.put("bootstrap.servers", serverHost);
        props.put("group.id", groupId);
//        props.put("enable.auto.commit", "true");
//        props.put("auto.commit.interval.ms", "1000");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumer = new KafkaConsumer<>(props);
        consumer.subscribe(topics);
    }

    public void startConsumer(){
        isRuning = true;
        ConsumerRecords<String, String> records;
        while (isRuning) {
            records = consumer.poll(1000);
//            for (ConsumerRecord<String, String> record : records)
//                System.err.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
            iResultHandler = new ResultHandler_kafka();
            iResultHandler.resultHandle(records);

            if (records.count() > 0) {
                // 提交offset
                try {
                    Thread.sleep(30 * 1000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
                consumer.commitSync();
            }
        }
    }

    public void stopConsumer(){
        this.isRuning = false;
    }
}
