package com.li.kafka.consumer;

import com.li.kafka.admin.AdminSample;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.logging.log4j.util.Strings;

import java.time.Duration;
import java.util.*;

public class ConsumerSample {
  private static KafkaConsumer<String, String> consumer;

  public static void main(String[] args) {

    //    ConsumerSample.commit("true");
    //    ConsumerSample.commit("false");

    //    ConsumerSample.commitWithPartition();
    //    ConsumerSample.commitWithPartition2();
    //    ConsumerSample.controlOffset();
    ConsumerSample.controlPause();
  }

  /** 手动提交offset，且手动控制partition */
  public static void commitWithPartition() {
    KafkaConsumer<String, String> consumer = ConsumerSample.getConsumer("false");

    Collection<String> topics = new ArrayList<String>();
    topics.add(AdminSample.TOPIC_NAME);
    // 订阅指定topic
    consumer.subscribe(topics);

    while (true) {
      // 消费数据
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
      // 每个partition单独处理
      for (TopicPartition partition : records.partitions()) {
        // 从partition中获取records
        List<ConsumerRecord<String, String>> recordsPartition = records.records(partition);

        for (ConsumerRecord<String, String> record : recordsPartition) {
          System.out.printf(
              "partition=%d,offset=%d,key=%s,value=%s%n",
              record.partition(), record.offset(), record.key(), record.value());
          // 获取最后消费的offset
          long lastOffset = recordsPartition.get(recordsPartition.size() - 1).offset();
          Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
          offsets.put(partition, new OffsetAndMetadata(lastOffset + 1));
          consumer.commitSync(offsets);
        }
      }
    }
  }

  /** 手动订阅partition */
  public static void commitWithPartition2() {
    KafkaConsumer<String, String> consumer = ConsumerSample.getConsumer("false");

    TopicPartition p0 = new TopicPartition(AdminSample.TOPIC_NAME, 0);
    TopicPartition p1 = new TopicPartition(AdminSample.TOPIC_NAME, 1);

    consumer.assign(Arrays.asList(p0)); // 只订阅一个分区

    while (true) {
      // 消费数据
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
      // 每个partition单独处理
      for (TopicPartition partition : records.partitions()) {
        // 从partition中获取records
        List<ConsumerRecord<String, String>> recordsPartition = records.records(partition);

        for (ConsumerRecord<String, String> record : recordsPartition) {
          System.out.printf(
              "partition=%d,offset=%d,key=%s,value=%s%n",
              record.partition(), record.offset(), record.key(), record.value());
          // 获取最后消费的offset
          long lastOffset = recordsPartition.get(recordsPartition.size() - 1).offset();
          Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
          offsets.put(partition, new OffsetAndMetadata(lastOffset + 1));
          consumer.commitSync(offsets);
        }
      }
    }
  }

  /** 手动指定offset起始位置，及手动提交 offset */
  public static void controlOffset() {
    KafkaConsumer<String, String> consumer = ConsumerSample.getConsumer("false");

    TopicPartition p0 = new TopicPartition(AdminSample.TOPIC_NAME, 0);
    TopicPartition p1 = new TopicPartition(AdminSample.TOPIC_NAME, 1);

    consumer.assign(Arrays.asList(p0)); // 只订阅一个分区

    while (true) {

      // 手动指定起始位置
      consumer.seek(p0, 190);
      // 消费数据
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
      // 每个partition单独处理
      for (TopicPartition partition : records.partitions()) {
        // 从partition中获取records
        List<ConsumerRecord<String, String>> recordsPartition = records.records(partition);

        for (ConsumerRecord<String, String> record : recordsPartition) {
          System.out.printf(
              "partition=%d,offset=%d,key=%s,value=%s%n",
              record.partition(), record.offset(), record.key(), record.value());
          // 获取最后消费的offset
          long lastOffset = recordsPartition.get(recordsPartition.size() - 1).offset();
          Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
          offsets.put(partition, new OffsetAndMetadata(lastOffset + 1));
          consumer.commitSync(offsets);
        }
      }
    }
  }
  /** consumer流量控制，限流 */
  public static void controlPause() {
    KafkaConsumer<String, String> consumer = ConsumerSample.getConsumer("false");

    TopicPartition p0 = new TopicPartition(AdminSample.TOPIC_NAME, 0);
    TopicPartition p1 = new TopicPartition(AdminSample.TOPIC_NAME, 1);

    consumer.assign(Arrays.asList(p0, p1));
    long totalNum = 5;
    while (true) {
      // 消费数据
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));

      // 每个partition单独处理
      for (TopicPartition partition : records.partitions()) {
        // 从partition中获取records
        List<ConsumerRecord<String, String>> recordsPartition = records.records(partition);
        long count = 0;

        for (ConsumerRecord<String, String> record : recordsPartition) {
          System.out.printf(
              "partition=%d,offset=%d,key=%s,value=%s%n",
              record.partition(), record.offset(), record.key(), record.value());

          count++;
          if (record.partition() == 0) {
            if (count >= totalNum) {
              // 暂停
              consumer.pause(Arrays.asList(p0));
            }
          }
          if (record.partition() == 1) {
            if (count == 8) {
              // 启动
              consumer.resume(Arrays.asList(p0));
            }
          }

          // 获取最后消费的offset
          long lastOffset = recordsPartition.get(recordsPartition.size() - 1).offset();
          Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
          offsets.put(partition, new OffsetAndMetadata(lastOffset + 1));
          consumer.commitSync(offsets);
        }
      }
    }
  }

  public static void commit(String isAuto) {
    KafkaConsumer<String, String> consumer = ConsumerSample.getConsumer(isAuto);
    Collection<String> topics = new ArrayList<String>();
    topics.add(AdminSample.TOPIC_NAME);

    // 订阅指定topic
    consumer.subscribe(topics);

    while (true) {
      // 消费数据
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
      for (ConsumerRecord<String, String> record : records) {
        System.out.printf(
            "partition=%d,offset=%d,key=%s,value=%s%n",
            record.partition(), record.offset(), record.key(), record.value());

        if (!Boolean.valueOf(isAuto)) {
          System.out.println("手动提交");
          consumer.commitAsync();
        }
      }
    }
  }

  public static KafkaConsumer<String, String> getConsumer(String isAuto) {
    if (ConsumerSample.consumer == null) {
      Properties prop = new Properties();
      prop.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "1.15.61.173:9092");
      prop.setProperty("group.id", "con-2");
      prop.setProperty(
          "enable.auto.commit",
          Strings.EMPTY != isAuto ? isAuto : "true"); // （默认开启） 开启自动提交offset（提交消费的位置），下次可以直接消费
      prop.setProperty("auto.commit.interval.ms", "10000"); // 默认5s，自动提交offset的时间间隔
      // 指定反序列化的格式
      prop.setProperty("key.deserializer", StringDeserializer.class.getName());
      prop.setProperty("value.deserializer", StringDeserializer.class.getName());

      ConsumerSample.consumer = new KafkaConsumer<String, String>(prop);
    }
    return ConsumerSample.consumer;
  }
}
