package com.lagou.bak;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichFlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;

import java.util.Properties;

public class YiQing {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000);
        long checkpointInterval = env.getCheckpointConfig().getCheckpointInterval();

        CheckpointConfig config = env.getCheckpointConfig();
        // 确保检查点之间有至少500 ms的间隔【checkpoint最小间隔】
        config.setMinPauseBetweenCheckpoints(500);
        // 检查点必须在一分钟内完成，或者被丢弃【checkpoint的超时时间】
        config.setCheckpointTimeout(60000);
        // 同一时间只允许进行一个检查点
        config.setMaxConcurrentCheckpoints(1);
//        env.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration());
        // 设置模式为exactly-once
        config.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 设置checkpoint的周期, 每隔1000 ms进行启动一个检查点
        config.setCheckpointInterval(1000);
        long checkpointInterval1 = config.getCheckpointInterval();
        System.out.println("......after:" + checkpointInterval1);
        // 任务流取消和故障时会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint，即退出后不删除checkpoint
        config.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        FsStateBackend fsStateBackend = new FsStateBackend(new Path("hdfs://hdp-1:9000/flink/checkpoints"));
        env.setStateBackend(fsStateBackend);

        String topic = "huanzhe";
        Properties props = new Properties();
        props.setProperty("bootstrap.servers","hdp-2:9092");
        props.setProperty("group.id","mygp");

        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(topic, new SimpleStringSchema(), props);
        consumer.setStartFromGroupOffsets();
        consumer.setCommitOffsetsOnCheckpoints(true);


        DataStreamSource<String> data = env.addSource(consumer);
        int parallelism = data.getParallelism();

        SingleOutputStreamOperator<Tuple3<Long, Long,String>> maped = data.map(new MapFunction<String, Tuple3<Long, Long,String>>() {
            @Override
            public Tuple3<Long, Long, String> map(String value) throws Exception {
                System.out.println(value);

                Tuple3<Long,Long,String> t = new Tuple3<Long,Long,String>(0l,0l,"");
                String[] split = value.split(",");

                try{
                    t = new Tuple3<Long, Long,String>(Long.valueOf(split[0]), Long.valueOf(split[1]),String.valueOf(split[2]));
                } catch (Exception e) {
                    e.printStackTrace();
                }
                return t;

            }
        });
        KeyedStream<Tuple3<Long,Long,String>, Long> keyed = maped.keyBy(value -> value.f0);



        //按照key分组策略，对流式数据调用状态化处理
        SingleOutputStreamOperator<Tuple3<Long, Long,String>> flatMaped = keyed.flatMap(new RichFlatMapFunction<Tuple3<Long, Long,String>, Tuple3<Long, Long,String>>() {
            ValueState<Tuple3<Long, Long,String>> sumState;

            @Override
            public void open(Configuration parameters) throws Exception {
                //在open方法中做出State
                ValueStateDescriptor<Tuple3<Long, Long,String>> descriptor = new ValueStateDescriptor<>(
                        "average",
                        TypeInformation.of(new TypeHint<Tuple3<Long, Long,String>>() {
                        }),
                        Tuple3.of(0L, 0L, "")
                );

                sumState = getRuntimeContext().getState(descriptor);
//                super.open(parameters);
            }

            @Override
            public void flatMap(Tuple3<Long, Long,String> value, Collector<Tuple3<Long, Long,String>> out) throws Exception {
                //在flatMap方法中，更新State
                Tuple3<Long, Long,String> currentSum = sumState.value();

                currentSum.f0 += 1;
                currentSum.f1 += value.f1;

                sumState.update(currentSum);
                out.collect(currentSum);
            }
        });

        flatMaped.print();

        env.execute();
    }
}
