package com.shujia.flink.state;

import lombok.AllArgsConstructor;
import lombok.Data;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

import java.util.HashMap;

/**
 * @author shujia
 */
public class Demo1NoState {
    public static void main(String[] args) throws Exception {
        //创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        /*
         * 开启checkpoint: 定时将计算的状态保存到HDFS中
         */
        //指定checkpoint间隔时间
        env.enableCheckpointing(5000);

        //指定checkpoint的路径
        env.getCheckpointConfig().setCheckpointStorage("hdfs://master:9000/flink/checkpoint");

        //任务手动取消的时候不会删除checkpoint的数据
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);


        //读取数据
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                //kafka broker列表
                .setBootstrapServers("master:9092,node1:9092,node2:9092")
                //指定topic
                .setTopics("lines")
                //消费者组
                .setGroupId("Demo1NoState")
                //读取数据的位置
                .setStartingOffsets(OffsetsInitializer.latest())
                //数据格式
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        //使用kafka source
        DataStream<String> lines = env
                .fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka source");

        //一行转换成多行
        DataStream<String> words = lines.flatMap((line, collect) -> {
            for (String word : line.split(",")) {
                collect.collect(word);
            }
        }, Types.STRING);

        //分组
        KeyedStream<String, String> keyBys = words.keyBy(word -> word);

        //统计单词的数量
        DataStream<WordCount> wordCounts = keyBys.process(new KeyedProcessFunction<String, String, WordCount>() {

            //成员变量在每一个task中是共享的
            //Integer count = 0;

            //为每一个key保存一个数量
            /**
             * 使用hashMap保存中间结果的问题
             * java中普通集合的数据是保存TaskManager在JVM的堆内存中的，如果任务执行报错，TaskManager也回挂掉，之前的结果会丢失
             */
            private HashMap<String, Integer> counts = new HashMap<>();

            /**
             * processElement方法每一条数据执行一次
             * @param word: 一行数据
             * @param ctx ：上下文对象，可以获取flink时间属性
             * @param out 用于将数据发生到下游
             */
            @Override
            public void processElement(String word,
                                       KeyedProcessFunction<String, String, WordCount>.Context ctx,
                                       Collector<WordCount> out) {
                //获取前面计算的结果
                Integer count = counts.getOrDefault(word, 0);
                //计算单词的数量
                count++;
                //将新的结果保存起来
                counts.put(word, count);

                //将计算结果发生到下游
                out.collect(new WordCount(word, count));
            }
        });

        wordCounts.print();
        env.execute();

    }

    @Data
    @AllArgsConstructor
    static class WordCount {


        private String word;
        private Integer count;
    }
}
