package com.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;

import java.io.*;
import java.util.*;

/*
 * 自定义保存offset
 */

public class ComsumerManual {

	private static Map<TopicPartition, Long> offset = new HashMap<TopicPartition, Long>();

	// 用来保存hash值
	private static String file = ComsumerManual.class.getResource("/offset").getPath();

	public static void main(String[] args) throws IOException {

		// 1.实例化consumer对象
		Properties properties = new Properties();

		properties.load(Consumer.class.getClassLoader().getResourceAsStream("consumer.properties"));

		KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);

		// 2.订阅主题，拉取消息
		// 订阅主题
		consumer.subscribe(Collections.singleton("hello2"), new ConsumerRebalanceListener() {

			// 分区分配之前做的事情
			@Override
			public void onPartitionsRevoked(Collection<TopicPartition> partitions) {

				// 提交旧的offset
				commit();

			}

			// 分区分配之后做的事情
			@Override
			public void onPartitionsAssigned(Collection<TopicPartition> partitions) {

				// 获取新的offset
				readOffset(partitions);

				for (TopicPartition partition : partitions) {

					Long os = offset.get(partition);

					if (os == null) {
						// 如果是分区的第一个消费者就从offset等于0的位置开始消费
						consumer.seek(partition, 0);
					} else {
						consumer.seek(partition, os);
					}


				}

			}
		});

		// 拉取消息
		while (true) {

			ConsumerRecords<String, String> records = consumer.poll(2000);

			// 原子绑定
			{
				for (ConsumerRecord<String, String> record : records) {

					// 消费
					System.out.println(record);
					// 消费完成之后将分区信息存入缓存中
					offset.put(new TopicPartition(record.topic(), record.partition()), record.offset());

				}

				commit();
			}

		}

	}

	/*
	 * 将缓存中的offset提交到自定义介质中
	 */
	private static void commit() {

		// 1.先从文件中读取旧的offset
		ObjectInputStream objectInputStream = null;
		Map<TopicPartition, Long> temp;

		try {

			objectInputStream = new ObjectInputStream(new FileInputStream(file));

			temp = (Map<TopicPartition, Long>) objectInputStream.readObject();

		} catch (Exception e) {

			temp = new HashMap<TopicPartition, Long>();
			e.printStackTrace();

		} finally {

			if (objectInputStream != null) {

				try {
					objectInputStream.close();
				} catch (IOException e) {

					e.printStackTrace();

				}

			}

		}

		// 2.合并我们的offset
		temp.putAll(offset);

		// 3.将新的offset写出去
		ObjectOutputStream objectOutputStream = null;

		try {

			objectOutputStream = new ObjectOutputStream(new FileOutputStream(file));

			objectOutputStream.writeObject(temp);

		} catch (IOException e) {

			e.printStackTrace();

		} finally {

			if (objectOutputStream != null) {

				try {

					objectOutputStream.close();

				} catch (IOException e) {

					e.printStackTrace();

				}

			}

		}

	}

	/*
	 * 从自定义介质中将offset读取到缓存中
	 */
	private static void readOffset(Collection<TopicPartition> partitions) {

		ObjectInputStream objectInputStream = null;
		Map<TopicPartition, Long> temp;

		try {

			objectInputStream = new ObjectInputStream(new FileInputStream(file));

			temp = (Map<TopicPartition, Long>) objectInputStream.readObject();

		} catch (Exception e) {

			temp = new HashMap<TopicPartition, Long>();
			e.printStackTrace();

		} finally {

			if (objectInputStream != null) {

				try {
					objectInputStream.close();
				} catch (IOException e) {

					e.printStackTrace();

				}

			}

		}

		// 从全部分区offset中读取我们分配到的分区的offset
		for (TopicPartition partition : partitions) {

			offset.put(partition, temp.get(partition));

		}

	}

}
