package com.roy.KafkaTest.stream.windowed;

import java.text.SimpleDateFormat;
import java.time.Instant;
import java.util.Date;
import java.util.Properties;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.KTable;
import org.apache.kafka.streams.kstream.TimeWindowedKStream;
import org.apache.kafka.streams.kstream.TimeWindows;
import org.apache.kafka.streams.kstream.Windowed;

public class MyWindowStreamDemo1 {
	
	private static final String BOOTSTRAP_SERVERS = "172.16.48.10:9092,172.16.48.11:9092,172.16.48.12:9092";
	private static final String TOPIC = "topic1";
	
	private static final long windows_size = 5000; //窗口大小5秒
	private static final long windows_interval = 1000; //窗口间隔1秒
	
	public static void main(String[] args) {
		Properties props = new Properties();
	    props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-ljf-test");
	    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
	    props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
	    props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
	    props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "500");//todo 默认值为30s，会导致30s才提交一次数据。
	    
	    StreamsBuilder builder = new StreamsBuilder();
	    KStream<String, String> data = builder.stream(TOPIC);
	    
//	    Instant initTime = Instant.now();
	    Date now = new Date();
	    Long initTime = now.getTime();
	    
	    TimeWindowedKStream<String, String> windowedStream = data.groupByKey()
	    	.windowedBy(TimeWindows.of(windows_size)
	    				.advanceBy(windows_interval));
	    //计算窗口内的消息条数，转成KTable
	    KTable<Windowed<String>, Long> count = windowedStream.count();
	    
	    count.toStream()
	    .filterNot((windowedKey,value )-> isOldWindow(windowedKey,value,initTime))
	    .foreach((windowedKey,value) -> dealWithTimeWindowAggrValue(windowedKey,value));
	    
	    Topology topo = builder.build();
	    KafkaStreams streams = new KafkaStreams(topo,props);
	    streams.start();
	    
//	    Thread.currentThread().join();
	}
	
	private static boolean isOldWindow(Windowed<String> windowKey, Long value, Long initTime) {
	    Long endTime = windowKey.window().end();
	    return endTime < initTime;
//	    return windowEnd.isBefore(initTime);
	}
	
	private static void dealWithTimeWindowAggrValue(Windowed<String> key, Long value) {
	    Windowed<String> windowed = getReadableWindowed(key);
	    System.out.println("处理聚合结果：key=" + windowed + ",value=" + value);
	}

	private static Windowed<String> getReadableWindowed(Windowed<String> key) {
	    return new Windowed<String>(key.key(), key.window()) {
	    	private SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss");
	        @Override
	        public String toString() {
	        	
	            String startTimeStr = sdf.format(new Date(window().start()));
	            String endTimeStr = sdf.format(new Date(window().end()));
	            return "[" + key() + "@" + startTimeStr + "/" + endTimeStr + "]";
	        }
	    };
	}
}
