package com.shujia.flink.state;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class Demo04KafkaWordCount {
    public static void main(String[] args) throws Exception {
        // 1、初始化环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        KafkaSource<String> kafkaSource = KafkaSource
                .<String>builder() // 指定Kafka中每条数据的格式
                .setBootstrapServers("master:9092,node1:9092,node2:9092") // 设置Kafka集群的地址
                .setTopics("in") // 指定Topic
                .setGroupId("my-group-0") // 指定消费者组ID
                // 设置初始的偏移量 earliest：从最早开始消费  latest：从最新的消息开始消费
                // 当消费的偏移量做了CK之后，指定为earliest也会从提交的偏移量开始继续往后消费
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setValueOnlyDeserializer(new SimpleStringSchema()) // 指定如何去解析Kafka中过来的每一条数据
                .build();

        // 默认情况下Kafka Source的并行度等于Topic的分区数
        DataStreamSource<String> kafkaDS = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafkaSource");


        // 3、对每行数据进行切分 将每个单词变成一行
        // 需要指定返回值的类型
        /*
         * 可以通过lambda表达式对代码进行简化
         * 如果直接使用lambda表达式要注意指定类型
         */
        DataStream<String> wordsDS = kafkaDS.flatMap((line, collector) -> {
            String[] splits = line.split(",");
            // 遍历每一个单词
            for (String word : splits) {
                // 使用collector将结果数据发送给下游
                collector.collect(word);
            }
            // java中的lambda表达式有时无法自动推断类型，需要手动指定
        }, Types.STRING);

        // 4、将每个word变成word，1这种形式
        DataStream<Tuple2<String, Integer>> kvDS = wordsDS.map(word -> Tuple2.of(word, 1), Types.TUPLE(Types.STRING, Types.INT));

        // 5、按照每个单词进行分组
        KeyedStream<Tuple2<String, Integer>, String> grpDS = kvDS.keyBy(kv -> kv.f0);

        // 6、统计单词数量
        DataStream<Tuple2<String, Integer>> wordCntDS = grpDS.sum(1);

        wordCntDS.print();

        // 启动任务
        env.execute();


    }
}
