package com.atguigu.chapter07.d_state;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;

import java.util.Properties;

/**
 * @ClassName: Flink09_Kafka_Flink_Kafka
 * @Description:
 * @Author: kele
 * @Date: 2021/4/9 14:36
 **/
public class Flink09_Kafka_Flink_Kafka {

    public static void main(String[] args) {

        System.setProperty("HADOOP_USER_NAME","atguigu");

        Configuration conf = new Configuration();
        conf.setInteger("rest.port",20000);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/flink/fs"));

        //设置checkpoint的超时时间，默认是10ms
        env.getCheckpointConfig().setCheckpointTimeout(60000);

        //设置每次只有一个checkpoint
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);

        //设置两次checkpoint的最小间隔
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);

        //在job中止后仍然可以保存checkpoint
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        //设置checkpoint的模式为只有一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        //开启checkpoint
        env.enableCheckpointing(2000);

        env.setParallelism(2);

        Properties props = new Properties();

        props.setProperty("bootstrap.servers", "hadoop162:9092,hadoop163:9092,hadoop164:9092");
        props.setProperty("group.id", "wc");
        props.setProperty("auto.offset.reset", "latest");

        env.addSource(new FlinkKafkaConsumer<String>("senion",new SimpleStringSchema(),props))
                .flatMap(new FlatMapFunction<String, Tuple2<String,Long>>() {
                    @Override
                    public void flatMap(String value, Collector<Tuple2<String, Long>> out) throws Exception {
                        for (String word : value.split(" ")) {

                            out.collect(Tuple2.of(word,1l));
                        }
                    }
                })
                .keyBy(d -> d.f0)
                .sum(1)
                .map(d->d.f0+" "+d.f1)
                .addSink(new FlinkKafkaProducer<String>("hadoop162:9092","wc",new SimpleStringSchema()));

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
