package com.company.messagequeue.kafka.consumer;

import com.company.messagequeue.kafka.constant.KafkaConstant;
import com.company.messagequeue.message.CommonMessage;
import com.company.messagequeue.warning.WarningUtil;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.log4j.Logger;

import java.text.ParseException;
import java.time.Duration;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

/**
 * Title: kafkaProducer<br>
 * Description: kafkaProducer<br>
 * CreateDate:2020年12月04日 19:13
 *
 */
public class KafkaConsumerClient {
  private static Logger logger = Logger.getLogger(KafkaConsumerClient.class);

  /**
   * 自己的线程池
   */
  private static volatile ThreadPoolExecutor threadPool;

  /**
   * Title: 获取线程<br>
   * Description: getThreadPool<br>
   * CreateDate: 2020年12月07日 10:43:50<br>
   *
   * @param
   * @return java.util.concurrent.ThreadPoolExecutor
   * @throws Exception
   * @category 获取线程
   * @author xxx
   */
  private static ThreadPoolExecutor getThreadPool() {
    if (threadPool == null) {
      synchronized (KafkaConsumerClient.class) {
        if (threadPool == null) {
          threadPool = new ThreadPoolExecutor(10, 20, 30, TimeUnit.SECONDS,
              new ArrayBlockingQueue<Runnable>(4),
              new ThreadPoolExecutor.DiscardOldestPolicy());
        }
      }
    }
    return threadPool;
  }

  /**
   * Title: 订阅方法<br>
   * Description: subscribe<br>
   * CreateDate: 2020年12月10日 10:29:28<br>
   *
   * @param queueName
   * @param groupId
   * @param kafkaMessageListener
   * @param serverAddress
   * @return void
   * @throws Exception
   * @category 订阅方法
   * @author xxx
   */
  public static void subscribe(String queueName, String groupId,
      KafkaMessageListener kafkaMessageListener, String serverAddress) {
    getThreadPool().execute(() -> {
      KafkaConsumer<String, String> consumer = structureConsumer(groupId, serverAddress);
      consumer.subscribe(Arrays.asList(queueName));
      while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

        for (ConsumerRecord<String, String> record : records) {
          // 反序列化String数据
          CommonMessage message = null;

          try {
            message = CommonMessage.strToMessage(record.value());
            // 执行消费程序
            if (null != message) {

              boolean consumerRes = kafkaMessageListener.consume(message);
              // 如果没有消费成功，开启一个线程重试3次，如果还失败则发报警，
              // 将offset号告知，以便之后人工处理（有可能是程序有问题，或者数据有问题）
              if (!consumerRes) {
                final CommonMessage messageReTry = message;
                CommonMessage finalMessage = message;
                getThreadPool().execute(() -> {
                  for (int i = 0; i < 3; i++) {
                    logger.error("kafka--->消费者重试处理次数：" + i);
                    try {
                      // 等待几秒钟
                      Thread.sleep(5000);
                      if (kafkaMessageListener.consume(messageReTry)) {
                        // 一次成功则跳出
                        logger.error("kafka--->消费者重试处理次数：" + i + "成功。");
                        break;
                      }
                    } catch (InterruptedException e) {
                      e.printStackTrace();
                      WarningUtil.sendWarning("kafka--->消费者处理失败message:" + finalMessage
                          + ",Exception"
                          + e);
                    }
                    logger.error("kafka--->消费者重试处理次数：" + i + "失败。");
                    // 如果是第三次尝试仍然失败，那么报警
                    if (i == 2) {
                      WarningUtil.sendWarning("kafka--->消费者重试处理次数：" + i + "失败。" + "message"
                          + finalMessage);
                    }

                  }
                });
              }
            }
          } catch (ParseException e) {
            logger.error("kafka--->消费者反序列化数据失败message:" + message);
            e.printStackTrace();
            WarningUtil.sendWarning("kafka--->消费者反序列化数据失败message:" + message + ",Exception" + e);
          } catch (Exception e) {
            logger.error("kafka--->消费者处理失败message:" + message);
            e.printStackTrace();
            WarningUtil.sendWarning("kafka--->消费者处理失败message:" + message + ",Exception" + e);
          }

          // 提交，这里手动提交避免自动提交一次拉取多个，在处理过程中服务down掉导致消息丢失
          Map<TopicPartition, OffsetAndMetadata> commitMap = new HashMap<>();
          commitMap.put(
              new TopicPartition(record.topic(), record
                  .partition()), new OffsetAndMetadata(record.offset() + 1));
          consumer.commitAsync(commitMap, null);

        }
      }
    });
  }

  /**
   * Title: 订阅方法<br>
   * Description: subscribe<br>
   * CreateDate: 2020年12月10日 10:29:28<br>
   *
   * @param queueName
   * @param groupId
   * @param kafkaMessageListener
   * @return void
   * @throws Exception
   * @category 订阅方法
   * @author xxx
   */
  public static void subscribe(String queueName, String groupId,
      KafkaMessageListener kafkaMessageListener) {
    subscribe(queueName, groupId,
        kafkaMessageListener, KafkaConstant.SERVER_ADDRESS);
  }

  /**
   * Title: 构建消费者<br>
   * Description: structureConsumer<br>
   * CreateDate: 2020年12月10日 10:40:18<br>
   *
   * @param groupId
   * @return org.apache.kafka.clients.consumer.KafkaConsumer<java.lang.String ,
   *         java.lang.String>
   * @throws Exception
   * @category 构建消费者
   * @author xxx
   */
  private static KafkaConsumer<String, String> structureConsumer(String groupId,
      String serverAddress) {

    Properties props = new Properties();
    props.put("bootstrap.servers", serverAddress);
    props.put("group.id", groupId);
    props.put("enable.auto.commit", "false");
    props.put("auto.commit.interval.ms", "1000");
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    return new KafkaConsumer<>(props);
  }
}
