package com.inspur.springkafka.stream2;

import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.state.KeyValueStore;
import org.apache.kafka.streams.state.StoreBuilder;
import org.apache.kafka.streams.state.Stores;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

public class WordCountTopologyDemo {
    public static void main(String[] args) {
        //0.配置KafkaStreams的连接信息
        Properties props = new Properties();
        props.put(StreamsConfig.APPLICATION_ID_CONFIG,"word-count-lowlevel");
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        //配置默认的key序列化和反序列化
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        //1.定义计算拓扑
        Topology topology=new Topology();


        //创建state，存储状态信息
        String storeName="wdcount";
        Map<String, String> changelogConfig = new HashMap();
        //指定replicas的最小数目
        changelogConfig.put("min.insync.replicas", "1");
        //清除策略   合并（默认为删除）
        changelogConfig.put("cleanup.policy","compact");
        // 消息在日志中保持未压缩的最短时间，仅适用于正在压缩的日志
        changelogConfig.put("log.cleaner.min.compaction.lag.ms","1000");

        StoreBuilder<KeyValueStore<String, Integer>> countStore = Stores.keyValueStoreBuilder(
                Stores.persistentKeyValueStore(storeName),
                Serdes.String(),
                Serdes.Integer())
                .withLoggingEnabled(changelogConfig);


        topology.addSource("s1","source")
                .addProcessor("p1",() -> new WordCountProcessor(storeName),"s1")
                .addStateStore(countStore,"p1")
                .addSink("sk1","sink",
                        new StringSerializer(),
                        new IntegerSerializer(),"p1");

        //3.创建KafkaStreams
        KafkaStreams kafkaStreams=new KafkaStreams(topology,props);
        //4.启动计算
        kafkaStreams.start();
    }
}
