package com.dituhui.tool.io;

import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.KStreamBuilder;
import org.apache.kafka.streams.kstream.ValueMapper;
import org.apache.log4j.Logger;

/**
 * Kafka工具
 * 
 * @author liweigu
 * 
 */
public class KafkaTool {
	private static final Logger LOGGER = Logger.getLogger(KafkaTool.class);
	// 超时时间，单位是ms
	private static final int POLL_TIMEOUT = 100;

	/**
	 * 提交数据
	 * 
	 * @param server 服务地址
	 * @param topic 主题
	 * @param data 数据内容
	 */
	public static void produce(String server, String topic, HashMap<String, String> data) {
		LOGGER.debug("produce start");
		Properties props = new Properties();
		props.put("bootstrap.servers", server);
		props.put("acks", "all");
		props.put("retries", 0);
		props.put("batch.size", 16384);
		props.put("linger.ms", 1);
		props.put("buffer.memory", 33554432);
		props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
		props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

		Producer<String, String> producer = new KafkaProducer<String, String>(props);
		for (String key : data.keySet()) {
			producer.send(new ProducerRecord<String, String>(topic, key, data.get(key)));
		}

		producer.close();
		LOGGER.debug("produce end");
	}

	/**
	 * 消费数据
	 * 
	 * @param server 服务地址
	 * @param topic 主题
	 * @param consumerRecordHandler 数据处理工具
	 * @param groupId 消费组分组id。如果传null，那么默认值是"test"。
	 * @param autoCommit 是否自动提交
	 * @param beginOffset 非自动提交时，是否设置起始索引
	 */
	public static void consume(String server, String topic, ConsumerRecordHandler consumerRecordHandler, String groupId, boolean autoCommit, Long beginOffset) {
		LOGGER.info("consume start. server = " + server + ", topic = " + topic + ", autoCommit = " + autoCommit + ", beginOffset = " + beginOffset);
		Properties props = new Properties();
		props.put("bootstrap.servers", server);
		if (groupId == null) {
			groupId = "test";
		}
		props.put("group.id", groupId);
		props.put("enable.auto.commit", "" + autoCommit);
		props.put("auto.commit.interval.ms", "1000");
		props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
		KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
		if (beginOffset != null) {
			// 从头读取
			TopicPartition partition0 = new TopicPartition(topic, 0);
			consumer.assign(Arrays.asList(partition0));
			// consumer.seekToBeginning(consumer.assignment());
			long offset = 0;
			if (beginOffset < 0) {
				Map<TopicPartition, Long> endOffsets = consumer.endOffsets(consumer.assignment());
				LOGGER.debug("endOffsets.size():" + endOffsets.size());
				for (TopicPartition partition : endOffsets.keySet()) {
					Long endOffset = endOffsets.get(partition);
					LOGGER.debug("endOffset=" + endOffset);
					offset = beginOffset + endOffset;
					if (offset < 0) {
						offset = 0L;
					}
					LOGGER.debug("offset=" + offset);
					// consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(offset + 1)));
				}
			}
			consumer.seek((TopicPartition) consumer.assignment().toArray()[0], offset);
		} else {
			// 订阅
			consumer.subscribe(Arrays.asList(topic));
			// consumer.subscribe(Arrays.asList(topic, "test"));
		}
		if (autoCommit) {
			// 自动管理索引
			while (true) {
				// LOGGER.debug("poll");
				try {
					ConsumerRecords<String, String> records = consumer.poll(POLL_TIMEOUT);
					List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
					for (ConsumerRecord<String, String> record : records) {
						buffer.add(record);
					}
					// LOGGER.debug("after poll");
					consumerRecordHandler.handle(buffer);
				} catch (Exception ex) {
					ex.printStackTrace();
					String expMessage = ex.getMessage();
					try {
						ByteArrayOutputStream buf = new java.io.ByteArrayOutputStream();
						ex.printStackTrace(new java.io.PrintWriter(buf, true));
						expMessage = buf.toString();
						buf.close();
					} catch (IOException ex1) {
						LOGGER.warn("解析ex发生异常。", ex1);
					}
					LOGGER.warn("KafkaTool.consume发生异常。msg=" + expMessage);
				}
			}
		} else {
			// 手动管理索引
			long minBatchSize = 1000;
			long maxTime = 1000;
			// 批量取数据
			List<ConsumerRecord<String, String>> buffer = new ArrayList<ConsumerRecord<String, String>>();
			long time = System.currentTimeMillis();
			while (true) {
				// LOGGER.debug("poll");
				ConsumerRecords<String, String> records = consumer.poll(POLL_TIMEOUT);
				for (ConsumerRecord<String, String> record : records) {
					buffer.add(record);
				}
				if (buffer.size() >= minBatchSize || (System.currentTimeMillis() - time >= maxTime)) {
					boolean needMoreData = consumerRecordHandler.handle(buffer);
					if (!needMoreData) {
						break;
					}
					// // 执行后才会更新offset
					// consumer.commitSync();
					// 恢复数据
					buffer.clear();
					time = System.currentTimeMillis();
				}
			}
		}
		LOGGER.warn("consume end");
	}

	/**
	 * 处理数据流
	 * 
	 * @param server 服务地址
	 * @param topic 主题
	 * @param newTopic 新主题
	 * @param valueMapper 值映射
	 */
	public static void stream(String server, String topic, String newTopic, ValueMapper<Object, Object> valueMapper) {
		LOGGER.debug("stream start");
		Map<String, Object> props = new HashMap<String, Object>();
		props.put(StreamsConfig.APPLICATION_ID_CONFIG, "my-stream-processing-application");
		props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, server);
		props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
		props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
		StreamsConfig config = new StreamsConfig(props);

		KStreamBuilder builder = new KStreamBuilder();

		builder.stream(topic).mapValues(valueMapper).to(newTopic);

		KafkaStreams streams = new KafkaStreams(builder, config);
		streams.start();
	}

	/**
	 * 数据处理工具接口
	 * 
	 * @author liweigu
	 * 
	 */
	public interface ConsumerRecordHandler {
		/**
		 * 数据处理
		 * 
		 * @param record 数据
		 * @return 是否还要持续取数据
		 */
		public boolean handle(List<ConsumerRecord<String, String>> records);
	}

	public static void main(String[] args) {
		String server = "192.168.10.251:9092";
		String topic = "ditu";
		if (args == null || args.length == 0) {
			server = "121.40.86.232:9092";
			server = "localhost:9092";
			topic = "GPS";
			args = new String[] { "consume3" };
		}
		ConsumerRecordHandler consumerRecordHandler = new ConsumerRecordHandler() {
			@Override
			public boolean handle(List<ConsumerRecord<String, String>> records) {
				for (ConsumerRecord<String, String> record : records) {
					System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
				}
				return true;
			}
		};
		if (args[0].equals("produce")) {
			HashMap<String, String> data = new HashMap<String, String>();
			for (int i = 0; i < 6; i++) {
				data.put("k" + i, "v" + i);
			}
			produce(server, topic, data);
		} else if (args[0].equals("consume")) {
			consume(server, topic, consumerRecordHandler, null, true, null);
		} else if (args[0].equals("consume2")) {
			consume(server, topic, consumerRecordHandler, null, false, null);
		} else if (args[0].equals("consume3")) {
			consume(server, topic, consumerRecordHandler, null, false, 0L);
		} else if (args[0].equals("consume4")) {
			String newTopic = topic + "New";
			consume(server, newTopic, consumerRecordHandler, null, false, 0L);
		} else if (args[0].equals("stream")) {
			ValueMapper<Object, Object> valueMapper = new ValueMapper<Object, Object>() {
				@Override
				public Object apply(Object arg0) {
					Object result;
					if (arg0 == null) {
						result = "size=0";
					} else {
						result = "size=" + arg0.toString().length();
					}
					return result;
				}
			};
			String newTopic = topic + "New";
			stream(server, topic, newTopic, valueMapper);
		}
	}
}
