package day03;

import day01.RichHdfsSourceFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.StateTtlConfig;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.KeyedCoProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

public class ConnectOprationMapState {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        //hdfs流
        // 保存md5值 放入状态中
        DataStreamSource<String> ds = env.addSource(new RichHdfsSourceFunction2("/user/wangy33/flink/cn.txt"));

        //设置Checkpoint
        env.enableCheckpointing(1000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setFailOnCheckpointingErrors(true);

        //设置外部存储
        // checkpointing每隔一段时间将状态保存到State Backend（默认异步）
        env.setStateBackend(new MemoryStateBackend(1024*1024*10,true));

        //设置恢复策略
        env.setRestartStrategy(
                RestartStrategies.fixedDelayRestart(
                        3, // number of restart attempts
                        Time.of(0, TimeUnit.SECONDS) // delay
                )
        );

        //env.setParallelism(1);
        SingleOutputStreamOperator<Tuple2<String, String>> ds1 = ds.map(new MapFunction<String, Tuple2<String, String>>() {
            @Override
            public Tuple2<String, String> map(String s) throws Exception {
                String[] arr = s.split("\t");
                return Tuple2.of(arr[0], arr[1]);
            }
        });

        //kafka
        Properties properties = new Properties();
        //连不上第一台kafka 连第二台
       // properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092，s3.hadoop:9092");
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092");
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group_33");//消费者组
        properties.setProperty("flink.partition-discovery.interval-millis", "5000");
        FlinkKafkaConsumer010<String> kafkaSource = new FlinkKafkaConsumer010<>("topic_33", new SimpleStringSchema(), properties);
        kafkaSource.setStartFromLatest();
        DataStreamSource<String> ds2 = env.addSource(kafkaSource);

        //connect
        // kafka -->String
        // hdfs  -->Tuple2<String, String>

        //专门处理connect的process() 里面CoProcessFunction（第一个流 第二个流 返回值）
        //join操作

        //因为多个slot的问题，数据在分发的时候不知道跑哪里去了
        //一个slot槽封装的cpu和内存是独立的 然后slot中各自的map也是独立的
        //keyby进行分组  让key相同的数据在一个slot相遇

        KeyedStream<Tuple2<String, String>, String> keyByhdfs = ds1.keyBy(new KeySelector<Tuple2<String, String>, String>() {
            @Override
            public String getKey(Tuple2<String, String> value) throws Exception {
                //按照hdsf 第一列key分组
                return value.f0;
            }
        });

        KeyedStream<String, String> keyBykafka = ds2.keyBy(new KeySelector<String, String>() {
            @Override
            public String getKey(String value) throws Exception {
                return value;
            }
        });


        //hdfs输入 kafka输入    hdfs输出  kafka输出
        //KeyedCoProcessFunction 和 CoProcessFunction 就是多了ctx.getCurrentKey() == tuple.f0 ==value
        keyByhdfs.connect(keyBykafka).process(new KeyedCoProcessFunction<String, Tuple2<String, String>, String, String>() {
            MapState<String,String> mapState=null;//将数据放入带有state的map中

            // 第一次启动    初始化状态
            // 不是第一次启动  恢复状态
            @Override
            public void open(Configuration parameters) throws Exception {
                MapStateDescriptor<String, String> mapDesc = new MapStateDescriptor<>("MapStateDescriptor", String.class, String.class);

                StateTtlConfig ttlConfig = StateTtlConfig
                        .newBuilder(Time.seconds(10))//过期时间
                        .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)//创建或写入就就不过期
                        .setStateVisibility(StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp)//过期永远找不到
                        .build();

                mapDesc.enableTimeToLive(ttlConfig);

                mapState=getRuntimeContext().getMapState(mapDesc);
            }//open方法中只能初始化或者是恢复状态的数据，但是签完不要对状态做任何的操作

            @Override
            public void processElement1(Tuple2<String, String> value, Context ctx, Collector<String> out) throws Exception {
                System.out.println("hdfs--->"+value);
                System.out.println(ctx.getCurrentKey());
                mapState.put(value.f0,value.f1);
            }

            @Override//输出一个匹配一个
            public void processElement2(String value, Context ctx, Collector<String> out) throws Exception {
                String s = mapState.get(value);
                mapState.entries().forEach(t-> System.out.println(t.getKey()+"--->"+t.getValue()));
                if(value.contains("CN")){
                    int i=1/0;
                }


                String s1 = s == null ? "unknow" : s;
                out.collect(s1);
            }
        }).print();


        env.execute();
        /**一个key对应这一个mapState
         * 将hdfs读出的数据 状态保存到mapState
         * 输入AD10秒后再输入一次还可以用一次 再输入就不行了
         */
    }
}
