package com.bonc.wkafka.utils;

import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.ExecutionException;

import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.Deserializer;
import org.apache.kafka.common.serialization.Serializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.bonc.wkafka.utils.consumer.KafkaConsumerProps;
import com.bonc.wkafka.utils.consumer.WKafkaConsumer;
import com.bonc.wkafka.utils.consumer.translator.KafkaRecordsTranslator;
import com.bonc.wkafka.utils.producer.KafkaProducerProps;
import com.bonc.wkafka.utils.producer.WKafkaProducer;
import com.bonc.wkafka.utils.producer.handler.KafkaExceptionHandler;
import com.bonc.wkafka.utils.producer.handler.KafkaSucMetadataHandler;

/**
 * kafka工具�?? deprecated--参照{@link WKafkaProducer}和{@link WKafkaConsumer}
 * @author WYB
 *
 */
@Deprecated
public class KafkaClient<K, V> {
	
	private static final Logger logger = LoggerFactory.getLogger(KafkaClient.class);
	
	private static PropertiesLoader pl = new PropertiesLoader();
	
	private KafkaProducerProps producerProps;
	
	private KafkaConsumerProps consumerProps;
	
	private KafkaProducer<K, V> producer;
	
	private KafkaConsumer<K, V> consumer;
	

	public KafkaClient<K, V> buildProducer(KafkaProducerProps props, Serializer<K> kSerializer, Serializer<V> vSerializer) {
		this.producerProps = props;
		producer =  new KafkaProducer<K, V>(props.getProps(), kSerializer, vSerializer);
		return this;
	}
	
	public KafkaClient<K, V> buildConsumer(KafkaConsumerProps props, Deserializer<K> kDeserializer, Deserializer<V> vDeserializer) {
		this.consumerProps = props;
		consumer = new KafkaConsumer<K, V>(props.getProps(), kDeserializer, vDeserializer);
		return this;
	}
	
//	public static void main(String[] args) throws ClassNotFoundException {
//		KafkaClient.createConsumer();
//	}
	
	/**
	 * @param topic
	 * @param key
	 * @param value
	 */
	public void doProducer(String topic, K key, V value) {
		ProducerRecord<K, V> record = new ProducerRecord<K, V>(topic, key, value);
		producer.send(record);
	}
	
	/**
	 * send sync
	 * @param topic
	 * @param key
	 * @param value
	 * @param sucMetaHandler
	 * @param exceptionHandler
	 * @throws ClassNotFoundException
	 */
	public void doProducerSync(String topic, K key, V value, KafkaSucMetadataHandler sucMetaHandler, KafkaExceptionHandler exceptionHandler) {
		ProducerRecord<K, V> record = new ProducerRecord<>(topic, key, value);
		try {
			// 阻塞等待broker的响�??
			// 如果broker返回错误，get()方法会抛出异�??
			RecordMetadata metadata = (RecordMetadata) producer.send(record).get();
			logger.info("topic: %s, partition: %s, offset: %d, serializedKeySize: %s, serializedValueSize: %s\n", 
					metadata.topic(), metadata.partition(), metadata.offset(), metadata.serializedKeySize(), metadata.serializedValueSize());
			if (sucMetaHandler!=null)
				sucMetaHandler.handle(metadata);
		} catch (InterruptedException | ExecutionException e) {
			e.printStackTrace();
			exceptionHandler.handle(e, record);
		}
	}
	
	/**
	 * send nsync（batch send�??
	 * 测试环境使用，Sender线程容易阻塞，�?�能不高（因为对成功的消息也做了处理�??
	 * @param topic
	 * @param key
	 * @param value
	 * @param sucMetaHandler
	 * @param exceptionHandler
	 * @throws ClassNotFoundException
	 */
	public void doProducerNsync(String topic, K key, V value, KafkaSucMetadataHandler sucMetaHandler, KafkaExceptionHandler exceptionHandler) {
		ProducerRecord<K, V> record = new ProducerRecord<>(topic, key, value);
		// Callback是运行在Sender线程中（每条消息都会触发�??次），务必要快�?�返回，以免影响消息发�?�的速度
		producer.send(record, new Callback() {
			
			@Override
			// 服务端处理record时产生的exception
			public void onCompletion(RecordMetadata metadata, Exception exception) {
				if (exception != null) {
					exceptionHandler.handle(exception, record);
				} else {
					sucMetaHandler.handle(metadata);
				}
			}
		});
	}
	
	/**
	 * 生产环境使用，不会对成功的消息做任何操作，否则会非常影响Sender线程的�?�能�??
	 * @param topic
	 * @param key
	 * @param value
	 * @param exceptionHandler
	 */
	public void doProducerNsync(String topic, K key, V value, KafkaExceptionHandler exceptionHandler) {
		ProducerRecord<K, V> record = new ProducerRecord<>(topic, key, value);
		// Callback是运行在I/O线程中，务必要快速返回，以免影响消息发�?�的速度
		producer.send(record, new Callback() {
			
			@Override
			// 服务端处理record时产生的exception
			public void onCompletion(RecordMetadata metadata, Exception exception) {
				if (exception == null) {
					return;
				}
				exceptionHandler.handle(exception, record);
			}
		});
	}
	
	/**
	 * At-Most Once
	 * 如果在提交之后崩溃，这些数据就不会被处理�??
	 * @param topics
	 * @param blockTimeMs
	 * @param recordHandler
	 */
	public void doConsumerAMS(List<String> topics, long blockTimeMs, KafkaRecordsTranslator<K, V> recordsHandler) {
		// �??要自定义rebalanceListener，用来在rebalance时进行offset的提�??
		consumer.subscribe(topics, new HandleRebalance());
		while(true) {
			ConsumerRecords<K, V> records = consumer.poll(Duration.ofMillis(blockTimeMs));
			consumer.commitAsync();
			recordsHandler.handle(records);
		}
	}
	
	/**
	 * At-Least Once / Exactly Once
	 * 如果record的处理是幂等的，同时自己处理异常及重试，那么这个方法就是EOS�??
	 * @param topics
	 * @param blockTimeMs
	 * @param recordHandler
	 */
	public void doConsumerALS(List<String> topics, long blockTimeMs, KafkaRecordsTranslator<K, V> recordsHandler) {
		// �??要自定义rebalanceListener，用来在rebalance时进行offset的提�??
		consumer.subscribe(topics, new HandleRebalance());
		while(true) {
			ConsumerRecords<K, V> records = consumer.poll(Duration.ofMillis(blockTimeMs));
			recordsHandler.handle(records);
			consumer.commitAsync();
		}
	}
	
	/**
	 * 再均衡监听器
	 * @author WYB
	 *
	 */
	private class HandleRebalance implements ConsumerRebalanceListener {

		/* (non-Javadoc)
		 * @see org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsRevoked(java.util.Collection)
		 * consumer停止读取消息之后到再均衡�??始之前，调用
		 */
		@Override
		public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
			// TODO Auto-generated method stub
			consumer.commitSync();
		}

		/* (non-Javadoc)
		 * @see org.apache.kafka.clients.consumer.ConsumerRebalanceListener#onPartitionsAssigned(java.util.Collection)
		 * 重新分配分区之后到consumer�??始读取消息之前，调用
		 */
		@Override
		public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
			// TODO Auto-generated method stub
			
		}
		
	}

	public void closeConsumer() {
		if (consumer!=null) {
			consumer.close();
		}
	}

	public void closeProducer() {
		if (producer!=null) {
			producer.close();
		}
		
	}
}
