package cn.itcast.flink.base;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaSerializationSchemaWrapper;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.util.Properties;

/**
 * Author itcast
 * Date 2021/7/30 9:25
 * 从 socket 读取数据流，设置 checkpoint ，将读取到数据wordcount并转换成字符串，保存到 kafka 中
 */
public class WordCountCheckpoint {
    public static void main(String[] args) throws Exception {
        //todo 创建环境，设置并行度为 1
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //todo 开启checkpoint 为 1 秒
        env.enableCheckpointing(1000);
        //todo 保存的chk到 hdfs文件系统/flink/checkpoints
        //todo当应用程序完全失败或者明确地取消时，保存 Checkpoint。
        env.setStateBackend(new FsStateBackend("hdfs://node1:8020/flink-checkpoints"));
        //todo 设置最大容忍chk失败数 5 次
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(5);
        //todo 设置最大并行 chk 数为 1 , 同时进行checkpoint的最大个数
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        //todo 设置每个checkpoint超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60000L);
        //todo 设置两个checkpoint之间的最短间隔时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        //todo 任务取消是否做 checkpoint 的清理，如果是 RETAIN_ON_CANCELLATION 如果checkpoint任务取消，
        //checkpoint 会被保存下来，否则 DELETE_ON_CANCELLATION checkpoint会被删除
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig
                .ExternalizedCheckpointCleanup
                .RETAIN_ON_CANCELLATION);

        //todo 设置需要手动输入的参数（这种写法比较好，这样规定了flink-web页面输入的参数名称）
        // 用args[]的方式可能出现一些格式问题，用ParameterTool工具很方便
        ParameterTool params = ParameterTool.fromArgs(args);
        //todo 获取hostname和port端口
        String hostname = params.get("hostname");
        int port = params.getInt("port");
        //todo 获取socket文本流
        DataStreamSource<String> source = env.socketTextStream(hostname, port);
        //todo 统计wordcount并转换成 word:count
        SingleOutputStreamOperator<String> result = source.flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
            @Override
            public void flatMap(String value, Collector<Tuple2<String, Long>> out) throws Exception {
                String[] words = value.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1L));
                }
            }
        }).keyBy(t -> t.f0)
                .sum(1)
                .map(new MapFunction<Tuple2<String, Long>, String>() {
                    @Override
                    public String map(Tuple2<String, Long> value) throws Exception {
                        return value.f0 + ":" + value.f1;
                    }
                });
        //todo 将数据写出到 Kafka 中
        //设置当前的参数属性
        Properties props = new Properties();
        props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"node1:9092,node2:9092,node3:9092");

        //根据参数实例化 FlinkKafkaProducer
        /*FlinkKafkaProducer<String> producer = new FlinkKafkaProducer<String>(
                "flink-kafka",
                new SimpleStringSchema(),
                props
        );*/
        FlinkKafkaProducer<String> producer = new FlinkKafkaProducer<String>(
                "wordcount",
                new KafkaSerializationSchemaWrapper(
                        "wordcount",
                        new FlinkFixedPartitioner(),
                        false,
                        new SimpleStringSchema()
                ),
                props,
                FlinkKafkaProducer.Semantic.AT_LEAST_ONCE
        );
        result.addSink(producer);
        //todo 执行程序 wordcountchk
        env.execute();
    }
}
