package demo.kafka.stream.toplogy;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.Topology;
import org.apache.kafka.streams.kstream.*;

import java.util.Arrays;

public class WordCountTopology {

    public static Topology build() {
        final StreamsBuilder builder = new StreamsBuilder();

        // 1. Source: Read from the input topic as a KStream
        KStream<String, String> textLines = builder.stream("test.stan",
                Consumed.with(Serdes.String(), Serdes.String()));

        // 2. Processing Steps
        KTable<String, Long> wordCounts = textLines
                // Split each text line into words (flatMapValues creates a new record for each word)
                .flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+")))
                // We now have a KStream where the key is the original line key (often null) and the value is a single word.
                // We need to re-key the stream by the word itself for grouping.
                .selectKey((key, word) -> word) // Now each record has (word, word) as (key, value)
                // Group by the new key (the word)
                .groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
                // Count the occurrences for each key, result stored in a stateful KTable
                .count(Materialized.as("counts-store")); // Naming the state store for querying

        // 3. Sink: Write the results to the output topic.
        // Convert the KTable to a KStream of changelog records and write to topic.
        wordCounts.toStream()
                .to("streams-wordcount-output", Produced.with(Serdes.String(), Serdes.Long()));

        return builder.build();
    }
}
