package com.distributer.settlement.comsumer;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.distributer.settlement.configuration.KafkaConsumerConfig;

public class ConsumerHandler implements Runnable {

	private static final Logger LOGGER = LoggerFactory.getLogger(ConsumerHandler.class);

	private final KafkaConsumer<String, String> consumer;

	private final ThreadPoolExecutor executor;
	
	public ConsumerHandler(String brokerList, String groupId, String topic,KafkaConsumerConfig kConsumerConfig) {
		Properties props = new Properties();
		props.put("bootstrap.servers", brokerList);
		props.put("group.id", groupId);
		props.put("enable.auto.commit", kConsumerConfig.getAutoCommit());
		props.put("auto.commit.interval.ms", kConsumerConfig.getAutoCommitInterval());
		props.put("max.poll.records",kConsumerConfig.getMaxPollRecords());  
		props.put("heartbeat.interval.ms", "3000");//kafka消费者线程向消费者协助者发生心跳包的评率。
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		consumer = new KafkaConsumer<>(props);
		consumer.subscribe(Arrays.asList(topic));
		executor = new ThreadPoolExecutor(4, 8, 1000l, TimeUnit.MILLISECONDS, new ArrayBlockingQueue<Runnable>(1024),new ThreadFactory() {
			private final AtomicInteger THREAD_NUMBER = new AtomicInteger(0);
			private final String CONSUMER_THREAD_PREFIX = "kafka-consumer-thread-";
			@Override
			public Thread newThread(Runnable r) {
				Thread thread = new Thread(r, CONSUMER_THREAD_PREFIX+THREAD_NUMBER.getAndIncrement());
				thread.setDaemon(true);
				return thread;
			}
		});
		 Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
			 public void run() {
			 shutdown();
			 }
		 }));
	}

	@Override
	public void run() {
		while (!Thread.interrupted()) {
			ConsumerRecords<String, String> records = consumer.poll(200);
			for (final ConsumerRecord<String, String> record : records) {
				executor.execute(new Worker(record));
                Map<TopicPartition, OffsetAndMetadata> currentOffset = new HashMap<TopicPartition, OffsetAndMetadata>();
                currentOffset.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset()+1, ""));
                consumer.commitSync(currentOffset);
			}
		}
	}

	public void shutdown() {
		if (consumer != null) {
			consumer.close();
			LOGGER.info("kafka消费者被关闭！");
		}
		executor.shutdown();
		try {
			if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
				executor.shutdownNow();
			}
		} catch (InterruptedException e) {
			LOGGER.info("强制关闭线程池！");
			executor.shutdownNow();
		}
	}

}
