/*
 ***************************************************************************************
 * All rights Reserved, Designed By RTF
 * @Title:  KafkaConsumerAdaptor.java   
 * @Package com.rtf.repair.mq.kafka.consumer
 * @Description: 消费者创建初始化  
 * @author: 司福林
 * @date:   2020-10-17 18:57:51   
 * @version V1.0 
 * @Copyright: 2020 RTF. All rights reserved.
 * 注意：本内容仅限于公司内部使用，禁止外泄以及用于其他的商业目
 *  ---------------------------------------------------------------------------------- 
 * 文件修改记录
 *     文件版本：         修改人：             修改原因：
 ***************************************************************************************
 */
package com.rtf.framework.mq.kafka.consumer;

import java.time.Duration;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;

import org.apache.commons.collections.IteratorUtils;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.util.Assert;

import com.rtf.framework.common.util.UtilBeanFactory;
import com.rtf.framework.mq.common.BatchMessageConsumer;
import com.rtf.framework.mq.common.CommonConstants;
import com.rtf.framework.mq.common.ConsumerProperties;
import com.rtf.framework.mq.common.MessageConsumer;
import com.rtf.framework.mq.common.Service;
import com.google.common.collect.Maps;

/**
 *  
 * @Description: 消费者创建并初始化、启动开始消费
 * @ClassName:  RocketmqConsumerAdaptor
 * @author: 67077
 * @date:   2020-10-17 18:57:51
 * @since:  v1.0
 */
public class KafkaConsumerAdaptor implements Service {

	/** The properties. */
	private KafkaConsumerProperties properties;

	/** The consumer properties. */
	private ConsumerProperties consumerProperties;

	private Consumer<String, byte[]> consumer;

	private AtomicBoolean closed = new AtomicBoolean(false);

	/**
	 * Instantiates a new rocketmq consumer adaptor.
	 *
	 * @param properties the properties
	 * @param consumerProperties the consumer properties
	 */
	public KafkaConsumerAdaptor(KafkaConsumerProperties properties, ConsumerProperties consumerProperties) {
		Assert.notNull(properties, "KafkaConsumerProperties is null");
		Assert.notNull(consumerProperties, "Kafka consumer is null");
		this.properties = properties;
		this.consumerProperties = consumerProperties;
	}

	@Override
	public void init() {
		Map<String, Object> configs = Maps.newHashMap();
		configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, properties.getServers());
		configs.put(ConsumerConfig.GROUP_ID_CONFIG, consumerProperties.getGroupName());
		configs.put(ConsumerConfig.CLIENT_ID_CONFIG, consumerProperties.getInstanceName());
		configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
				CommonConstants.CONSUMER_OFFSET_LAST == consumerProperties.getConsumerWhere() ? "latest"
						: CommonConstants.CONSUMER_OFFSET_FIRST == consumerProperties.getConsumerWhere() ? "earliest"
								: "none");
		configs.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, properties.getMaxPollRecords());
		configs.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, properties.getSessionTimeout());
		configs.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, properties.getHeartbeatInterval());
		configs.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, properties.getAutocommitInterval());
		configs.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, properties.getFetchMaxBytes());
		consumer = new KafkaConsumer<String, byte[]>(configs, new StringDeserializer(), new ByteArrayDeserializer());
		consumer.subscribe(consumerProperties.getTopics());
	}

	@Override
	public void start() {

		Object handler = UtilBeanFactory.getApplicationContext().getBean(consumerProperties.getHandler());
		boolean isBatch = handler instanceof MessageConsumer ? false : true;
		MessageConsumer signHandler = isBatch ? null : (MessageConsumer) handler;
		BatchMessageConsumer batchHandler = isBatch ? (BatchMessageConsumer) handler : null;
		new Thread(() -> {
			Duration duration = Duration.ofMillis(properties.getMaxPollInterval());
			ConsumerRecords<String, byte[]> records;
			while (!closed.get()) {
				records = consumer.poll(duration);

				if (records.isEmpty()) {
					continue;
				}
				if (isBatch) {
					batchHandler.onMessageKafka(IteratorUtils.toList(records.iterator()));
				} else {

					for (ConsumerRecord<String, byte[]> record : records) {
						signHandler.onMessage(record);
					}
				}

			}
		}).start();
	}

	@Override
	public void destroy() {
		closed.set(true);
		consumer.close();
	}

}
