package com.usian.kafka.stream;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.*;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Properties;

public class CountNumApplication {
    private static final String INPUT_TOPIC = "count-num-stream-topic";
    private static final String OUT_TOPIC = "count-num-stream-consumer";
    private static final String KAFKA_IP = "192.168.6.139:9092";
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_IP);
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "count_num");
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        //创建流构建器
        StreamsBuilder builder = new StreamsBuilder();
        //设置读取消息的stream
        KStream<String, String> stream = builder.stream(INPUT_TOPIC);
        //计算
        //数据示例：c1:1 c2:2 c3:3
        stream.flatMap(new KeyValueMapper<String, String, Iterable<KeyValue<String, Integer>>>() {
            @Override
            public Iterable<KeyValue<String, Integer>> apply(String key, String value) {
                // [{c1:1}]  将数据存入list集合
                String[] split = value.split(":");
                KeyValue<String, Integer> keyValue = new KeyValue<>(split[0], Integer.parseInt(split[1]));
                List<KeyValue<String, Integer>> list = new ArrayList<>();
                list.add(keyValue);
                return list;
            }
        }).groupByKey(Grouped.with(Serdes.String(),Serdes.Integer())).//指定聚合统计分组的key和value序列化数据类型
                windowedBy(TimeWindows.of(Duration.ofSeconds(10))).//时间窗，每隔10秒给一次聚合结果
                aggregate(//聚合统计
                new Initializer<Integer>() {
                    @Override
                    public Integer apply() {
                        //设置计算的初始值
                        return 0;
                    }
                }, new Aggregator<String, Integer, Integer>() {
                    // key: 就是参数中的key  value:传递的最新值 aggregate：上一次计算结果
                    @Override
                    public Integer apply(String key, Integer value, Integer aggregate) {
                        System.out.println(new Date()+"--key==="+key+"----aggregate=="+aggregate+"------value=="+value);
                        return value+aggregate;
                    }
                },Materialized.with(Serdes.String(),Serdes.Integer())
        ).mapValues(
                //重新构造数据返回
                new ValueMapper<Integer, String>() {
                    @Override
                    public String apply(Integer value) {
                        return value.toString();
                    }
                }
        ).toStream().to(OUT_TOPIC);
        KafkaStreams streams = new KafkaStreams(builder.build(),props);
        streams.start();
    }
}
