package hyl.base.mq.kafka;

import java.time.Duration;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;

import hyl.core.run.IRun;

public class MyKafka {
	private KafkaConsumer<byte[], byte[]> _consumer;
	private KafkaProducer<byte[], byte[]> _producer;
	private final Duration DUR = Duration.ofSeconds(9);
	// =0 表示停止 -1表示无限循环 n>0 表示运行n次
	private AtomicInteger _count = new AtomicInteger(-1);

	public static MyKafka getInsProducer() {
		MyKafka kfk = new MyKafka();
		kfk.iniProducer();
		return kfk;
	}

	public static MyKafka getInsConsumer(String 客户端id) {
		MyKafka kfk = new MyKafka();
		kfk.iniConsumer(客户端id);
		return kfk;
	}

	private MyKafka() {
	}

	/**
	 * 初始化发送端
	 * <p>
	 * Title: iniProducer
	 * </p>
	 * <p>
	 * Description:
	 * </p>
	 * 
	 * @return :KafkaProducer<byte[],byte[]>
	 */
	public KafkaProducer<byte[], byte[]> iniProducer() {
		if (_producer == null) {
			_producer = new KafkaProducer<byte[], byte[]>(KfkConfig.getCfgPub());
		}
		return _producer;
	}

	/**
	 * <p>
	 * Title: iniConsumer
	 * </p>
	 * <p>
	 * Description: 初始化接收端
	 * </p>
	 * 
	 * @param count    允许接收的最大次数
	 * @param clientid 定义消费者id
	 * @return :KafkaConsumer<byte[],byte[]>
	 */
	public KafkaConsumer<byte[], byte[]> iniConsumer(Integer count, String clientid) {
		if (_consumer == null) {
			_consumer = new KafkaConsumer<byte[], byte[]>(KfkConfig.getCfgSub(clientid));
		}
		if (count != null)
			_count.set(count);
		return _consumer;
	}

	public KafkaConsumer<byte[], byte[]> iniConsumer(String clientid) {
		return iniConsumer(null, clientid);
	}

	public void seek(long offset) {

		for (TopicPartition partition : _consumer.assignment()) {
			_consumer.seek(partition, offset);
		}
		_consumer.assignment();

	}

	/**
	 * 要求按顺序 发送(同步方式),默认是0 通道 发送以后 消费端以assign 方式指定接收方式 (分组消费策略
	 * 通过partition.assignment.strategy 设置)
	 * 
	 * @param topic
	 * @param key
	 * @param data
	 * @return
	 */
	public RecordMetadata sendSyn(String topic, byte[] key, byte[] data) {
		return sendSyn(topic, 0, key, data);
	}

	/**
	 * 要求按顺序 发送(同步方式),确保数据接收的顺序,每发送一条确认一下 再发送<br>
	 * 虽然会比较慢,但是稳定性有保障
	 * 
	 * @param topic
	 * @param 通道
	 * @param key
	 * @param data
	 * @return
	 */
	public RecordMetadata sendSyn(String topic, int 通道, byte[] key, byte[] data) {
		ProducerRecord<byte[], byte[]> record = new ProducerRecord<byte[], byte[]>(topic, 通道, key, data);
		Future<RecordMetadata> records = iniProducer().send(record);
		try {
			return records.get();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
			return null;
		}
	}

	/**
	 * 发送一条记录到topic
	 * 
	 * @param topic
	 * @param key
	 * @param data
	 * @return
	 */
	public Future<RecordMetadata> send(String topic, byte[] key, byte[] data) {
		ProducerRecord<byte[], byte[]> pr = new ProducerRecord<byte[], byte[]>(topic, key, data);
		Future<RecordMetadata> records = iniProducer().send(pr);
		return records;
	}

	/**
	 * 批次发送,一起确认 如果future调用get()， 则将阻塞，直到相关请求完成并返回该消息的metadata，<br>
	 * 或抛出发送异常。 并行异步发送<br>
	 * send()方法是异步的，添加消息到缓冲区等待发送，并立即返回。 <br>
	 * 生产者将单个的消息批量在一起发送来提高效率。<br>
	 */
	public Future<RecordMetadata> send(String topic, byte[] key, byte[] data, Callback 发送成功) {
		ProducerRecord<byte[], byte[]> pr = new ProducerRecord<byte[], byte[]>(topic, key, data);
		Callback fun = (null == 发送成功) ? new defaultsendcallback() : 发送成功;
		Future<RecordMetadata> records = iniProducer().send(pr, fun);
		return records;
	}

	/**
	 * 默认发送完毕的回执函数
	 * 
	 * @author 37798955@qq.com
	 *
	 */
	class defaultsendcallback implements Callback {
		// 完成发送的回调函数
		@Override
		public void onCompletion(RecordMetadata metadata, Exception exception) {
			if (null != exception && null != metadata) {// 异常处理
				System.out.println("记录的offset在:" + metadata.offset());
				System.out.println(exception.getMessage() + exception);
			}
		}
	}

	/**
	 * 监控 所有 发送任务Future,如果异常抛出false
	 * 
	 * 与send(...) 函数一起使用
	 * 
	 * @param records
	 * @return
	 */
	public boolean isSendFinished(List<Future<RecordMetadata>> records) {
		//RecordMetadata rm;
		for (Future<RecordMetadata> record : records) {
			try {
				record.get();
			} catch (InterruptedException | ExecutionException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
				return false;
			}
		}
		return true;

	}

	// 异步接收,不保证消息达到次序
	public void receive(String topics, IRun 回调函数) {
		// 自动分配分区
		// iniConsumer();
		// TopicPartition partition0 = new TopicPartition(topics, 0);
		// Collections.singletonList(topics);
		// 异步不区分通道
		_consumer.subscribe(Arrays.asList(topics.split(";")));

//		 int offset = 10;
//		    for (TopicPartition partition : _consumer.assignment()) {
//		    	System.out.println("partition="+partition);
//		    	_consumer.seek(partition, offset);
//		    }

		while (_count.get() != 0) {
			// System.out.println("receive:=" + _count.get());
			ConsumerRecords<byte[], byte[]> records = _consumer.poll(DUR);// DUR 设置拿去数据时超时， 防止链接不稳定 所以设置成10s
			for (ConsumerRecord<byte[], byte[]> record : records) {
				// System.out.printf("offset = %d, key = %s, value = %s", record.offset(), new
				// String(record.key()),
				// new String(record.value()));
				// System.out.println(new String(record.value()));

				回调函数.run( record);

			}
			if (!records.isEmpty()) {
				commit();
			}
			if (_count.get() > 0) {
				_count.decrementAndGet();
			}
		}
	}

	/**
	 * 注意:同步接收时, 区分通道 默认是0 ,表示所有通道都可以
	 * 
	 * @param topic
	 * @param 回调函数
	 * @throws Exception
	 */
	public void receiveSyn(String topic, IRun 回调函数) throws Exception {
		receiveSyn(topic, 0, 回调函数);
	}

	/**
	 * 同步接收,保证接收顺序
	 * 
	 * @param topic
	 * @param 通道
	 * @param 回调函数
	 * @throws Exception
	 */
	public void receiveSyn(String topic, int 通道, IRun 回调函数) {
		// 手动分配分区
		_consumer.assign(Arrays.asList(new TopicPartition(topic, 通道)));
		while (_count.get() != 0) {
			ConsumerRecords<byte[], byte[]> records = _consumer.poll(DUR);// 设置超时9s没有获取超时
			for (ConsumerRecord<byte[], byte[]> record : records) {
			   回调函数.run(record);
			}
			if (!records.isEmpty()) {
				commitSyn();
			}
			if (_count.get() > 0) {
				_count.decrementAndGet();
			}

		}
	}

	/**
	 * 关闭
	 */
	public void close() {
		_count.set(0);

		if (_producer != null) {
			_producer.flush(); // 将缓冲区的消息立即发送
			// consumer.commit(); //消费者手动确认消费进度
			_producer.close();
		}
		if (_consumer != null) {
			_consumer.close();
		}
	}

	/**
	 * 手动提交时 使用
	 */
	public void commit() {
		if (!KfkConfig.isAutoCommit()) {
			// _consumer.commitAsync();
			// System.out.println("批处理结束..");
			// _consumer.com
			_consumer.commitAsync();
		}
	}

	/**
	 * 手动提交时 使用
	 */
	public void commitSyn() {
		if (!KfkConfig.isAutoCommit()) {
			// _consumer.commitAsync();
			// System.out.println("批处理结束..");
			_consumer.commitSync();

		}
	}
}
// 参考资料:
// https://blog.csdn.net/m0_37739193/article/details/78396773
// https://www.w3cschool.cn/apache_kafka/apache_kafka_simple_producer_example.html
