package lowlevel.state;

import lowlevel.LineSplitProcesser;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.state.KeyValueStore;
import org.apache.kafka.streams.state.StoreBuilder;
import org.apache.kafka.streams.state.Stores;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

/**
 * @Author:RenPu
 * @Date: 2020/3/27 13:15
 * @Version: 1.0
 * @description:
 */
public class WortCountWithProcessorAPI {

    public static void main(String[] args) {

        //1创建配置对象
        Properties properties = new Properties();

        //2与kafka集群地址获取连接
        properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"");

        //key的序列化器和反序列化器
        properties.put(StreamsConfig.DEFAULT_WINDOWED_KEY_SERDE_INNER_CLASS, Serdes.Integer().getClass());

        //value的序列化器和反序列化器
        properties.put(StreamsConfig.DEFAULT_WINDOWED_KEY_SERDE_INNER_CLASS,Serdes.String().getClass());

        //设置拉取时消费组的ID
        properties.put(StreamsConfig.APPLICATION_ID_CONFIG,"");

        //设置流处理，开启的线程数量
        properties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG,2);

        //手动编制拓扑
        Topology topology = new Topology();

        //给拓扑任务添加资源的标识，订阅的主题(数据来源)
        topology.addSource("s1","t7");

        //给拓扑任务添加处理器(匿名内部类的写法)
//        topology.addProcessor("p1", new ProcessorSupplier() {
//            public Processor get() {
//                return new LineSplitProcesser();
//            }
//        },"s1");//p1处理器的数据来源于，s1的source的主题t7


        //创建map集合封装一些，清理策略，同步策略

        Map<String, String> changeLong = new HashMap<>();

        //设置最小同步复制因子
        changeLong.put("min.insyc.replicas","1");

        //changeLong数据清理策略
        //1：默认策略delete，删除保留期过期的数据
        //2：compact(压实)，多个key相同的数据，使用新值覆盖老值
        changeLong.put("cleanup.policy","compact");

        //给p1添加state状态处理
        StoreBuilder<KeyValueStore<String, Long>> storeBuilder = Stores.keyValueStoreBuilder(
                Stores.persistentKeyValueStore("Counts"),
                Serdes.String(),
                Serdes.Long()
        ).withLoggingEnabled(changeLong);//开启远程副本数据备份，避免容错，故障数据恢复

        topology.addStateStore(storeBuilder,"p1");


        //给拓扑任务添加状态储存器




        //给拓扑任务添加处理器(lamda的写法)（拓扑任务逻辑处理的单元）
        topology.addProcessor("p1",()->new LineSplitProcesser(),"s1");


        //topology.addProcessor("p2",()->new WordCountProcesser(),"p1");

        //给拓扑任务添加输出(流数据处理结果的输出)
        topology.addSink("k1","t8",new StringSerializer(),new LongSerializer(),"p1");


        //初始化kafka流处理对象

        KafkaStreams streams = new KafkaStreams(topology, properties);

        //运行流处理对象
        streams.start();



    }
}
