package com.roy.KafkaTest.stream;

import java.util.Locale;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.processor.Processor;
import org.apache.kafka.streams.processor.ProcessorContext;
import org.apache.kafka.streams.processor.ProcessorSupplier;
import org.apache.kafka.streams.processor.PunctuationType;
import org.apache.kafka.streams.state.KeyValueIterator;
import org.apache.kafka.streams.state.KeyValueStore;
import org.apache.kafka.streams.state.StoreBuilder;
import org.apache.kafka.streams.state.Stores;

public class WordCountProcessorDemo {
	static class MyProcessorSupplier
			implements
				ProcessorSupplier<String, String> {

		@Override
		public Processor<String, String> get() {
			return new Processor<String, String>() {
				private ProcessorContext context;
				private KeyValueStore<String, Integer> kvStore;

				@Override
				@SuppressWarnings("unchecked")
				public void init(final ProcessorContext context) {
					this.context = context;
					this.context.schedule(1000,
							PunctuationType.STREAM_TIME, timestamp -> {
								try (final KeyValueIterator<String, Integer> iter = kvStore
										.all()) {
									System.out.println("----------- "
											+ timestamp + " ----------- ");

									while (iter.hasNext()) {
										final KeyValue<String, Integer> entry = iter
												.next();

										System.out.println("[" + entry.key
												+ ", " + entry.value + "]");

										context.forward(entry.key,
												entry.value.toString());
									}
								}
							});
					this.kvStore = (KeyValueStore<String, Integer>) context
							.getStateStore("Counts");
				}

				@Override
				public void process(final String dummy, final String line) {
					final String[] words = line.toLowerCase(Locale.getDefault())
							.split(" ");

					for (final String word : words) {
						final Integer oldValue = this.kvStore.get(word);

						if (oldValue == null) {
							this.kvStore.put(word, 1);
						} else {
							this.kvStore.put(word, oldValue + 1);
						}
					}

					context.commit();
				}

				@Override
				public void close() {
				}
			};
		}
	}

	public static void main(final String[] args) {
		final Properties props = new Properties();
		props.put(StreamsConfig.APPLICATION_ID_CONFIG,
				"streams-wordcount-processor");
		props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
		props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
		props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG,
				Serdes.String().getClass());
		props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,
				Serdes.String().getClass());

		// setting offset reset to earliest so that we can re-run the demo code
		// with the same pre-loaded data
		props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
		//持久化stateStore的本地目录
//		props.put(StreamsConfig.STATE_DIR_CONFIG, "D:\\my-kfkStream-state");

		final Topology builder = new Topology();

		builder.addSource("Source", "streams-plaintext-input");

		builder.addProcessor("Process", new MyProcessorSupplier(), "Source");
		//基于内存的StateStore
		builder.addStateStore(Stores.keyValueStoreBuilder(
				Stores.inMemoryKeyValueStore("Counts"), Serdes.String(),
				Serdes.Integer()), "Process");
		//使用本地持久化state Store。会在本地创建RocksDB的数据副本。这个东东写的性能极高，但是对大Value和大数据量的性能不是很好。
		//使用持久化的stateStore，需要配置持久化目录StreamsConfig.STATE_DIR_CONFIG
//		StoreBuilder<KeyValueStore<String, String>> myStateStore = Stores.keyValueStoreBuilder(Stores.persistentKeyValueStore("my_state_store"),
//	                Serdes.String(),Serdes.String()).withCachingEnabled();
//		builder.addStateStore(myStateStore,"Process");

		builder.addSink("Sink", "streams-wordcount-processor-output",
				"Process");

		final KafkaStreams streams = new KafkaStreams(builder, props);
		final CountDownLatch latch = new CountDownLatch(1);

		// attach shutdown handler to catch control-c
		Runtime.getRuntime()
				.addShutdownHook(new Thread("streams-wordcount-shutdown-hook") {
					@Override
					public void run() {
						streams.close();
						latch.countDown();
					}
				});

		try {
			streams.start();
			latch.await();
		} catch (final Throwable e) {
			System.exit(1);
		}
		System.exit(0);
	}
}
