package com.heima.kafka.streams;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.kstream.*;

import java.util.Arrays;
import java.util.Properties;

/**
 * @BelongsProject: heima-leadnews
 * @BelongsPackage: com.heima.kafka.streams
 * @Author: wangjian
 * @CreateTime: 2022-07-08  17:12
 * @Description: TODO
 * @Version: 1.0
 */

public class WordCountApplication {

    public static void main(final String[] args) throws Exception {
        Properties props = new Properties();
//       指定一个应用的id
        props.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-application");
//       kafka的地址
        props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.85.143:9092");
//       key的序列化
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
//       value的序列化
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());

        StreamsBuilder builder = new StreamsBuilder();
//       读取指定topic中的数据
        KStream<String, String> textLines = builder.stream("TextLinesTopic");
//（k,v）–>(k,v1),(k,v2)
//       原数据：key："10010"  value:"tom jerry jack tom tom jack"
//       topic中的数据转成数组
        KStream<String, String> stringKStream = textLines.flatMapValues(new ValueMapper<String, Iterable<String>>() {
            @Override
            public Iterable<String> apply(String value) { //value  就是原数据
                return Arrays.asList(value.split(" "));
            }
        });
//       stringKStream：
//       10010 tom
//       10010 jerry
//       10010 jack
//       10010 tom
//       10010 tom
//       10010 jack

//       根据value分组  三个参数：p1:key  p2:value  p3:用来分组的属性
        KGroupedStream<String, String> groupedStream = stringKStream.groupBy(new KeyValueMapper<String, String, String>() {
            @Override
            public String apply(String key, String value) {
                return value; //根据value分组
            }
        });
//       groupedStream：
//       10010 tom
//       10010 tom
//       10010 tom
//       10010 jerry
//       10010 jack
//       10010 jack

//       数量的统计
        KTable<String, Long> kTable = groupedStream.count(Materialized.with(Serdes.String(), Serdes.Long()));
//       kTable
//        tom:3
//        jerry:1
//        jack:2
//       把上面的结果转成另一个map  把原数据中的value由Long转成String
        KStream<String, String> kStream = kTable.toStream().map(new KeyValueMapper<String, Long, KeyValue<String, String>>() {
            @Override
            public KeyValue<String, String> apply(String key, Long value) {
                return new KeyValue<>(key, value.toString());
            }
        });

        kStream.to("WordsWithCountsTopic");

//       KTable<String, Long> wordCounts = textLines
//           .flatMapValues(textLine -> Arrays.asList(textLine.toLowerCase().split("\\W+")))
//           .groupBy((key, word) -> word)
//           .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store"));
//       wordCounts.toStream().to("WordsWithCountsTopic", Produced.with(Serdes.String(), Serdes.Long()));

        KafkaStreams streams = new KafkaStreams(builder.build(), props);
        streams.start();
    }
}
