package com.zhanghe.study;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.utils.Bytes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.*;
import org.apache.kafka.streams.state.KeyValueStore;

import java.util.Arrays;
import java.util.Properties;

public class TestCount {
    public static void main(String[] args) {
        Properties prop = init();
        StreamsBuilder builder = new StreamsBuilder();
        KStream<String,String> stream = builder.stream("testCountInputTopic");
        stream.print(Printed.toSysOut());
        KTable<String,Long> table = stream
//                .flatMapValues(line -> Arrays.asList(line.split("\\t")))
                .groupBy((key,value) -> value)
                //指定状态仓库的名称  （路径默认在/tmp/kafka-streams中）防止重启之后数据从头开始
                .count(Materialized.as("counts-store"));
        // 指定序列化
        table.toStream().to("testCountOutputTopic", Produced.with(Serdes.String(),Serdes.Long()));
        KafkaStreams kafkaStreams = new KafkaStreams(builder.build(),prop);
        kafkaStreams.start();
    }

    public static Properties init(){
        Properties properties = new Properties();
        // 流处理应用的id，必须指定
        properties.put(StreamsConfig.APPLICATION_ID_CONFIG,"KStream-test");
        properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        // key序列化反序列化
        properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
        //value序列化反序列化
        properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
        return properties;
    }
}
