package io.growing.channel.kafka;

import com.google.common.util.concurrent.AbstractExecutionThreadService;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Message;
import com.google.protobuf.TextFormat;
import com.lmax.disruptor.dsl.Disruptor;
import io.growing.channel.EventTranslator;
import io.growing.channel.ProtobufMessages;
import io.growing.channel.config.ChannelConfig;
import io.growing.channel.config.DelayQueueConfig;
import io.growing.channel.constants.ChannelConstants;
import io.growing.channel.kafka.deserializer.DelayMessageDeserializer;
import io.growing.channel.model.ChannelEvent;
import io.growing.channel.model.DelayMessage;
import io.growing.channel.model.MessageMeta;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

@Slf4j
public final class DelayMessageConsumer extends AbstractExecutionThreadService {

	// 10分钟唤醒一次所有的partition
	private static final int RESUME_PERIOD = 10;

	private KafkaConsumer<String, DelayMessage> consumer;

	private final AtomicBoolean resumeFlag = new AtomicBoolean(false);

	private final Disruptor<ChannelEvent> disruptor;

	private final Map<String, DelayQueueConfig.DelayLevel> topicMap = new HashMap();

	private final Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();

	private final Set<TopicPartition> skipPartition = new HashSet<>();

	private final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1,
			new ThreadFactoryBuilder().setNameFormat("delay consumer listener").build());

	public DelayMessageConsumer(Disruptor<ChannelEvent> disruptor) {
		this.disruptor = disruptor;
	}

	@Override
	protected void startUp() {
		Properties props = initAndGetConsumerProperties();

		consumer = new KafkaConsumer<>(props, new StringDeserializer(), new DelayMessageDeserializer());

		DelayQueueConfig.delayLevelMap.values().forEach(l -> topicMap.put(l.getTopic(), l));

		consumer.subscribe(topicMap.keySet(), new SaveOffsetsOnRebalance(consumer, currentOffsets));

		// 定时任务，负责将暂停消费的partition重新启动
		executor.scheduleAtFixedRate(() -> resetResumeFlag(RESUME_PERIOD), RESUME_PERIOD, RESUME_PERIOD,
				TimeUnit.MINUTES);

		log.info("Kafka consumer subscribes:{} ", topicMap.keySet());
	}

	@Override
	protected void run() {

		Set<TopicPartition> needPausePartitions = new HashSet<>();
		long logtime = 0;
		while (isRunning()) {
			if (logtime++ % 600 == 0) {
				log.info("poll from delay queue: {}", consumer.subscription());
			}
			try {
				ConsumerRecords<String, DelayMessage> records = consumer
						.poll(Duration.ofMillis(ChannelConfig.KAFKA_POLL_DURATION));
				long nowTime = System.currentTimeMillis();
				records.forEach(m -> {
					DelayQueueConfig.DelayLevel delayLevel = topicMap.get(m.topic());
					TopicPartition topicPartition = new TopicPartition(m.topic(), m.partition());
					MessageMeta msgMeta = m.value().getMsgMeta();
					long timestamp = msgMeta == null ? 0 : msgMeta.getTimestamp();
					long shouldConsumeTime = delayLevel.getDelayTime() + timestamp;
					long needDelayMinutes = (shouldConsumeTime - nowTime) / 1000 / 60;
					// 如果满足延时条件
					log.info("msgmeata timestamp:{},delayTime:{},needDelayMinutes:{}", (timestamp / 1000 / 60),
							(delayLevel.getDelayTime() / 1000 / 60), needDelayMinutes);
					if (needDelayMinutes <= 0 && processMessage(topicPartition, m.value())) {
						// 经过以上的 /1000/60 之后，单位被转化为分钟，所以这里会有 1——59秒的提前消费，不过这没有关系，是可以接受的
						log.info("consume delay message from topic:{}", m.topic());
						// 先处理消息
						// 后提交偏移量，后面有幂等性处理，不怕重复消费
						// offset指向下一个要消费的记录
						currentOffsets.put(topicPartition, new OffsetAndMetadata(m.offset() + 1, ""));
					} else if (needDelayMinutes > 0) {
						// 如果不满足延时条件，说明这个消息还没到该消费的时间，就不提交偏移量，等待下一次重新消费,
						// 并且短暂的暂停消费，为其他partition 让渡资源
						needPausePartitions.add(topicPartition);
						// 在等待 delayMinutes 分钟之后恢复 partition 的消费
						if (needDelayMinutes < RESUME_PERIOD) {
							executor.schedule(() -> resetResumeFlag(needDelayMinutes), needDelayMinutes,
									TimeUnit.MINUTES);
						}
					}
				});
			} catch (Exception e) {
				log.error("consumer poll message error", e);
			}
			// 提交偏移量
			commitOffset();
			// 恢复被暂停消费的partition
			resumePartitionIfNecessary();
			// 暂停暂时不需要被消费的partition
			pausePartitionsIfNecessary(needPausePartitions);

			resetSkipPartition();
		}
	}

	private void resetResumeFlag(long delayTime) {
		log.info("reset resume flag from {} after {} minute", resumeFlag.get(), delayTime);
		resumeFlag.set(true);
	}

	private void resetSkipPartition() {
		skipPartition.clear();
	}

	private boolean processMessage(TopicPartition topicPartition, DelayMessage delayMessage) {
		if (skipPartition.contains(topicPartition)) {
			return false;
		}
		Message message = null;
		try {
			message = parseMessage(delayMessage);
		} catch (Exception e) {
			log.error("parseMessage error", e);
			return true;
		}
		boolean publish = disruptor.getRingBuffer().tryPublishEvent(new EventTranslator(), message,
				System.currentTimeMillis());
		if (!publish) {
			log.info("RingBuffer is busy,give up publish delay message:{}", delayMessage.getMsgMeta());
			skipPartition.add(topicPartition);
		} else {
			log.info("Kafka is consuming from delay queue{}:{}", delayMessage.getMsgMeta().getRetryTime(), TextFormat.printToUnicodeString(message));

		}
		return publish;
	}

	private Message parseMessage(DelayMessage delayMessage) throws InvalidProtocolBufferException {
		switch (delayMessage.getBusinessType()) {
			case ChannelConstants.WX :
				return ProtobufMessages.WxMessage.parseFrom(delayMessage.getMessage());
			case ChannelConstants.WEBHOOK :
				return ProtobufMessages.WebhookMessage.parseFrom(delayMessage.getMessage());
			case ChannelConstants.PUSH :
				return ProtobufMessages.PushMessage.parseFrom(delayMessage.getMessage());
			case ChannelConstants.SMS :
				return ProtobufMessages.SmsMessage.parseFrom(delayMessage.getMessage());
			case ChannelConstants.EMAIL :
				return ProtobufMessages.EmailMessage.parseFrom(delayMessage.getMessage());
			default :
				log.error("not process business type:{}", delayMessage.getBusinessType());
				return null;
		}
	}

	private void pausePartitionsIfNecessary(Set<TopicPartition> needPausePartitions) {
		try {
			if (!needPausePartitions.isEmpty()) {
				log.info("pause partition:{}", needPausePartitions);
				consumer.pause(needPausePartitions);
				needPausePartitions.clear();
			}
		} catch (Exception e) {
			log.error("pause partition error", e);
		}
	}

	private void commitOffset() {

		if (!currentOffsets.isEmpty()) {
			try {
				// 同步方式，如果提交失败会一直重试，直到有异常抛出或成功，这里是延时重试队列，只是用来做补偿的，优先级并不高，所以可以等
				consumer.commitSync(currentOffsets);
				currentOffsets.forEach((k, v) -> consumer.seek(k, v.offset()));
			} catch (Exception e) {
				log.error("commit offset error", e);
			}
		}
	}

	private void resumePartitionIfNecessary() {
		try {
			// 这里不在定时任务里去做，是因为定时任务是另外的线程，kafka会报错
			// java.util.ConcurrentModificationException: KafkaConsumer is not safe for
			// multi-threaded access
			Set<TopicPartition> paused = null;
			if (resumeFlag.get() && !(paused = consumer.paused()).isEmpty()) {
				log.info("resume partition:{}", paused);
				consumer.resume(paused);
				resumeFlag.set(false);
			}
		} catch (Exception e) {
			log.error("resume partition error", e);
		}
	}

	private Properties initAndGetConsumerProperties() {
		Properties props = new Properties();

		props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, ChannelConfig.KAFKA_BROKERS);
		props.put(ConsumerConfig.GROUP_ID_CONFIG, "channel-consumer");

		// 关闭自动提交
		props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
		// 如果没有消费过,就从最开始进行消费
		props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
		// 每次只拉10条，就是一点一点处理的意思，是为了给正式队列让渡资源
		props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10);
		// 会话超时，kafka进行 reblance
		props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
		// 一次消费最长时间，如果超过也会进行 reblance
		props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 10000);

		return props;
	}

	@Override
	protected void shutDown() {
		log.info("consumer is closing");
		consumer.close();
	}

	// 当kafka进行reblance时，会触发监听器
	@AllArgsConstructor
	static class SaveOffsetsOnRebalance implements ConsumerRebalanceListener {

		private final KafkaConsumer consumer;

		private final Map<TopicPartition, OffsetAndMetadata> currentOffsets;

		@Override
		public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
			log.info("被撤销了partition:{}", partitions);
			partitions.forEach(currentOffsets::remove);
		}

		@Override
		public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
			log.info("被分配了partition:{}", partitions);
			partitions.forEach(p -> {
				try {
					long position = consumer.position(p);
					currentOffsets.put(p, new OffsetAndMetadata(position, ""));
				} catch (Exception e) {
					log.error("init offset error", e);
				}
			});
			log.info("init offset:{}", currentOffsets);
		}
	}
}
