package com.corn.kafkastream.demo;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.common.serialization.Serde;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.utils.Bytes;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.*;
import org.apache.kafka.streams.state.KeyValueStore;

import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.stream.Collector;
import java.util.stream.Collectors;

/**
 * @author : Jim Wu
 * @version 1.0
 * @function :
 * @since : 2022/9/15 15:32
 */
@Slf4j
public class KafkaStreamsWordCount {

    private static final String BOOTSTRAP_SERVER = "node1:9092";

    private static final String INPUT_TOPIC = "stream.input";

    private static final String OUTPUT_TOPIC = "stream.output";

    private static final String APP_ID = "WORD_COUNT_APP_ID";

    public static void main(String[] args) throws InterruptedException {
        // 1. 创建配置
        Properties properties = new Properties();
        properties.put(StreamsConfig.APPLICATION_ID_CONFIG, APP_ID);
        properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVER);
        // word count需要配置默认的Serdes
        properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
        properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG,Serdes.String().getClass());
        // 2. streams builder
        StreamsBuilder builder = new StreamsBuilder();
        builder.stream(INPUT_TOPIC, Consumed.with(Serdes.String(), Serdes.String()).withName("input-stream").withOffsetResetPolicy(Topology.AutoOffsetReset.LATEST))
                .mapValues((ValueMapper<String, String>) String::toLowerCase)
                .flatMapValues((v) -> Arrays.asList(v.split(" ")))
                .selectKey((k, v) -> v)
                .groupByKey()
                .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store"))
                .toStream()
//                .print(Printed.toSysOut());
                .to(OUTPUT_TOPIC, Produced.with(Serdes.String(), Serdes.Long()));

        Topology topology = builder.build();
        System.out.println(topology.describe());
        KafkaStreams streams = new KafkaStreams(topology, properties);

        CountDownLatch countDownLatch = new CountDownLatch(1);

        streams.start();

        countDownLatch.await();

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            streams.close();
            countDownLatch.countDown();
        }));

    }

}
