package com.holly.unit.kafka.impl;

import cn.hutool.core.util.StrUtil;
import com.holly.unit.kafka.KafkaApi;
import com.holly.unit.kafka.config.KafkaConfig;
import com.holly.unit.kafka.exception.KafkaException;
import com.holly.unit.kafka.exception.enums.KafkaExceptionEnum;
import com.holly.unit.kafka.model.*;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;



/**
 * kafka 封装接口实现类，包括发送消息和接收消息
 * @author pengpeng
 * @param <K>
 *
 */
@Slf4j
public class KafkaApiImpl implements KafkaApi {
	private  static  KafkaProducerPool kafkaProducerPool = null;
	//线程安全 确保配置初始化一次。 其实 @ConditionalOnMissingBean 这个注解已经保证线程安全了。
	private static volatile KafkaConfig kafkaConfig  = null;
	public KafkaApiImpl(KafkaConfig _kafkaConfig) {
		super();
		if(kafkaConfig == null){
			synchronized (this.getClass()) {
				if(kafkaConfig == null) {
					kafkaConfig = _kafkaConfig;
					kafkaProducerPool = new KafkaProducerPool(kafkaConfig);
				}
			}
		}
	}

	public KafkaApiImpl() {
		super();
		// TODO Auto-generated constructor stub
	}


	public void sendMessage(List<Kmessage> kmessageList) throws KafkaException {
		String tid = Long.toString(Thread.currentThread().getId());
		KafkaProducerPool.localVar.set(new KlocalThread(tid, KproducerType.PRODUCER_TYPE_NORMAL));
		// 2. 创建一个生产者对象KafkaProducer
		KafkaProducer<String, Object> kafkaProducer = kafkaProducerPool.getProducer(0);
		try {
			if(kmessageList== null || kmessageList.isEmpty() )
			{
				String format = StrUtil.format(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getUserTip(),"消息不能为空");
				throw new KafkaException(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getErrorCode(), format);
			}
			for(Kmessage kmsg : kmessageList)
			{
				ProducerRecord<String, Object> producerRecord = new ProducerRecord(kmsg.getTopic(), kmsg.getPartition(), kmsg.getKey(),kmsg.getVal());
				kafkaProducer.send(producerRecord, new Callback() {
					@Override
					public void onCompletion(RecordMetadata metadata, Exception exception) {
						// 1. 判断发送消息是否成功
						if(exception == null) {
							// 发送成功
							// 主题
							String topic = metadata.topic();
							// 分区id
							int partition = metadata.partition();
							// 偏移量
							long offset = metadata.offset();
						}
						else {
							log.error("发送消息失败:{}",exception);
							String format = StrUtil.format(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getUserTip(),metadata);
							throw new KafkaException(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getErrorCode(), format);
						}
					}
				});
			}
		} catch (Exception e) {
			log.error("发送消息失败:{}",e);
			String format = StrUtil.format(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getUserTip(),e.getMessage());
			throw new KafkaException(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getErrorCode(), format);
		}finally {
			kafkaProducerPool.freeProducer(kafkaProducer);
		}
	}
	/**
	 * 默认一批次拉取500条
	 * @param topic
	 * @return List<Kmessage> 集合
	 * @throws InterruptedException
	 */
	@Override
	public void receiveMessage(String topic,Integer partition,String groupId,CallbackReceiveMessage callback,boolean isAutoCommit) throws KafkaException {
		Properties props = new Properties();
		props.setProperty("bootstrap.servers", kafkaConfig.getBootstrapServers());
		// 消费者组（可以使用消费者组将若干个消费者组织到一起），共同消费Kafka中topic的数据
		// 每一个消费者需要指定一个消费者组，如果消费者的组名是一样的，表示这几个消费者是一个组中的
		props.setProperty("group.id", groupId);
		props.put("client.id", Thread.currentThread().getName());

		List<Kmessage> messageList = new ArrayList<>();
		// 自动提交offset
		props.setProperty("enable.auto.commit", "false");
		if(isAutoCommit)
		{
			props.setProperty("enable.auto.commit", "true");
			// 自动提交offset的时间间隔
			props.setProperty("auto.commit.interval.ms",kafkaConfig.getAutoCommitInterval());
		}
		// 拉取的key、value数据的
		props.setProperty("key.deserializer", kafkaConfig.keyDeserializer);
		props.setProperty("value.deserializer",kafkaConfig.valueDeserializer);
		//拉取策略
		props.setProperty("auto.offset.reset",kafkaConfig.autoOffsetReset);

		AtomicBoolean isNoStop = new AtomicBoolean(true);
		// 2.创建Kafka消费者
		KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(props);
		try {
			// 3. 订阅要消费的主题
			// 指定消费者从哪个topic中拉取数据
			if(partition == null)
			{
				kafkaConsumer.subscribe(Arrays.asList(topic));
			}else
			{
				ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
				topicPartitions.add(new TopicPartition(topic,partition));
				kafkaConsumer.assign(topicPartitions);
			}
			while(isNoStop.get()) {
				messageList.clear();
				// Kafka的消费者一次拉取一批的数据
				ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(1));
				if(consumerRecords.isEmpty()){ continue;}
				// 5.将将记录（record）的offset、key、value都打印出来
				for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
					Object key = consumerRecord.key();
					Object value = consumerRecord.value();
					Kmessage message = new Kmessage();
					message.setTopic(consumerRecord.topic());
					message.setPartition(consumerRecord.partition());
					message.setKey(key);
					message.setOffset(consumerRecord.offset());
					message.setVal(value);
					messageList.add(message);
				}
				if(!isAutoCommit){
					callback.receiveMessage(messageList, kafkaConsumer,isNoStop);
				} else {
					callback.receiveMessage(messageList, null, isNoStop);
				}
			}
		} catch (Exception e) {
			log.error("获取全部topic失败:{}",e);
			String format = StrUtil.format(KafkaExceptionEnum.KAFKA_MQ_RECEIVE_ERROR.getUserTip(),e.getMessage());
			throw new KafkaException(KafkaExceptionEnum.KAFKA_MQ_RECEIVE_ERROR.getErrorCode(), format);
		}finally {
			kafkaConsumer.close();
		}
	}




	@SuppressWarnings("resource")
	@Override
	public void sendMessageWithTranactions(List<Kmessage> kmessageList) throws KafkaException{

		String tid = "holl_"+kafkaConfig.getTranactionalId();

		KafkaProducerPool.localVar.set(new KlocalThread(tid, KproducerType.PRODUCER_TYPE_TRANCTION));
		// 2. 创建一个生产者对象KafkaProducer
		KafkaProducer<String, Object> kafkaProducer = (KafkaProducer<String, Object>) kafkaProducerPool.getProducer(0);
		if(kmessageList== null || kmessageList.isEmpty() )
		{
			String format = StrUtil.format(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getUserTip(),"发送消息不能为空");
			throw new KafkaException(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getErrorCode(), format);
		}

		try {
			kafkaProducer.beginTransaction();

			for(Kmessage kmsg : kmessageList)
			{
				ProducerRecord<String, Object>  producerRecord = new ProducerRecord(kmsg.getTopic(), kmsg.getPartition(), kmsg.getKey(),kmsg.getVal());
				kafkaProducer.send(producerRecord);
			}
			kafkaProducer.commitTransaction();
		} catch (Exception e) {
			kafkaProducer.abortTransaction();
			log.error("事务发送数据:{}",e);
			kafkaProducer.close();
			String format = StrUtil.format(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getUserTip(),e.getMessage());
			throw new KafkaException(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getErrorCode(), format);
		}
	}

	/**
	 * 数据转换流 从Topic "from" 接收数据并逻辑处理数据，再发送到 Topic "to"
	 * @param groupId    消费者分配组
	 * @param tranactionId  事务id,如果为null，则为默认事务id
	 * @param fromTopic {"topic":"xx","partition":null}  partition可以为null
	 * @param toTopic   {"topic":"xx","partition":null}  partition可以为null
	 * @param transformationCallBack
	 */
	@Override
	public void topicTransformation(String groupId,String tranactionId,Ktopic fromTopic,Ktopic toTopic,CallbackReceiveMessage transformationCallBack)
	{
		KafkaConsumer<String, String> kafkaConsumer = null;
		try {
			if(tranactionId == null) tranactionId = kafkaConfig.tranactionalId;
			String tid = "holl_"+tranactionId;

			//producer
			KafkaProducerPool.localVar.set(new KlocalThread(tid, KproducerType.PRODUCER_TYPE_TRANCTION));
			KafkaProducer<String, Object> kafkaProducer = (KafkaProducer<String, Object>) kafkaProducerPool.getProducer(0);

			//consumer
			kafkaConsumer = makeConsumer(groupId, fromTopic);

			List<Kmessage> messageList = new ArrayList<>();

			AtomicBoolean isNoStop = new AtomicBoolean(true);

			// 3. 编写一个while死循环，在while循环中不断拉取数据，进行处理后，再写入到指定的topic
			while(isNoStop.get()) {
				try {
					messageList.clear();
					// (1)	生产者开启事务
					kafkaProducer.beginTransaction();

					// 这个Map保存了topic对应的partition的偏移量
					Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();

					// 从topic中拉取一批的数据
					// (2)	消费者拉取消息
					ConsumerRecords<String, String> concumserRecordArray = kafkaConsumer.poll(Duration.ofSeconds(5));
					// (3)	遍历拉取到的消息，并进行预处理
					for (ConsumerRecord<String, String> consumerRecord : concumserRecordArray) {
						try {
							Object key = consumerRecord.key();
							Object value = consumerRecord.value();
							long offset = consumerRecord.offset();
							Kmessage message = new Kmessage();
							message.setTopic(consumerRecord.topic());
							message.setPartition(consumerRecord.partition());
							message.setKey(key);
							message.setVal(value);
							messageList.add(message);

							// offset + 1：offset是当前消费的记录（消息）对应在partition中的offset，而我们希望下一次能继续从下一个消息消息
							// 必须要+1，从能消费下一条消息
							offsetMap.put(new TopicPartition(consumerRecord.topic(), consumerRecord.partition()), new OffsetAndMetadata(offset + 1));

							transformationCallBack.receiveMessage(messageList, kafkaConsumer, isNoStop);

							// (4)	生产消息到dwd_user topic中
							ProducerRecord<String, Object>  producerRecord = new ProducerRecord(toTopic.getTopic(), toTopic.getPartition(), message.getKey(),message.getVal());
							// 发送消息
							java.util.concurrent.Future<RecordMetadata> future = kafkaProducer.send(producerRecord);

							future.get();
						} catch (Exception e) {
							kafkaProducer.abortTransaction();
							throw new RuntimeException(e);
						}
					}
					kafkaProducer.sendOffsetsToTransaction(offsetMap, groupId);
					kafkaProducer.commitTransaction();
				}catch (Exception e) {
					throw  new RuntimeException(e);
				}
			}
		} catch (Exception e) {
			log.error("kafkaFromT2T:{}",e);
			String format = StrUtil.format(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getUserTip(),e.getMessage());
			throw new KafkaException(KafkaExceptionEnum.KAFKA_MQ_SEND_ERROR.getErrorCode(), format);
		}finally
		{
			kafkaProducerPool.removeProducer();
			if(kafkaConsumer != null)
			{
				kafkaConsumer.close();
				kafkaConsumer = null;
			}

		}

	}

	private KafkaConsumer<String, String> makeConsumer(String groupId, Ktopic fromTopic) {
		Properties props = new Properties();
		props.setProperty("bootstrap.servers", kafkaConfig.getBootstrapServers());
		// 消费者组（可以使用消费者组将若干个消费者组织到一起），共同消费Kafka中topic的数据
		// 每一个消费者需要指定一个消费者组，如果消费者的组名是一样的，表示这几个消费者是一个组中的
		props.setProperty("group.id", groupId);
		props.put("client.id", Thread.currentThread().getName());
		props.put("isolation.level","read_committed");

		// 自动提交offset
		props.setProperty("enable.auto.commit", "false");
		// 自动提交offset的时间间隔
		props.setProperty("auto.commit.interval.ms",kafkaConfig.getAutoCommitInterval());
		// 拉取的key、value数据的
		props.setProperty("key.deserializer", kafkaConfig.keyDeserializer);
		props.setProperty("value.deserializer",kafkaConfig.valueDeserializer);
		//拉取策略
		props.setProperty("auto.offset.reset",kafkaConfig.autoOffsetReset);
		KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(props);

		if(fromTopic.getPartition() == null)
		{
			kafkaConsumer.subscribe(Arrays.asList(fromTopic.getTopic()));
		}else
		{
			ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
			topicPartitions.add(new TopicPartition(fromTopic.getTopic(),fromTopic.getPartition()));
			kafkaConsumer.assign(topicPartitions);
		}
		return kafkaConsumer;
	}

}
