package com.atguigu.chapter07.d_state;

import com.atguigu.chapter5.source.WaterSensor;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

import java.io.IOException;

/**
 * @ClassName: Flink01_State_Operator_List
 * @Description:
 * @Author: kele
 * @Date: 2021/4/7 19:38
 *
 * 状态后端测试
 *      状态后端分三种：
 *          MemoryStateBackend （默认的方式）
 *          FsStateBackend
 *          RocksDBStateBackend（需要添加依赖）
 *
 **/
public class Flink08_Key_State_Backend {

    public static void main(String[] args) throws IOException {

        //需要配置hdfs读取的权限
        System.setProperty("HADOOP_USER_NAME","atguigu");

        Configuration conf = new Configuration();
        conf.setInteger("rest.port",20000);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        env.setParallelism(2);

        /****设置状态后端为 fs ****/
        //env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/flink/checkpoint/fs"));

        /**********设置状态后端为：rocksDB模式，撒谎那个穿集群需要添加依赖*******************/
        env.setStateBackend(new RocksDBStateBackend("hdfs://hadoop162:8020/flink/checkpoint/rocks"));

        //开启状态后端
        env.enableCheckpointing(5000); //默认单位是ms
        env.getCheckpointConfig().setCheckpointTimeout(10000);


        env.socketTextStream("hadoop162",8888)
                .map(datas->{

                    String[] data = datas.split(",");
                    return new WaterSensor(data[0],
                            Long.parseLong(data[1]) * 1000,
                            Integer.valueOf(data[2]));
                })
                .keyBy(WaterSensor::getId)
                .process(new KeyedProcessFunction<String, WaterSensor, String>() {

                    private MapState<Integer, String> mapState;

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        //去重，只用到map中的key值，value不需要设置
                        mapState = getRuntimeContext()
                                .getMapState(new MapStateDescriptor<Integer, String>("MapState", Integer.class, String.class));
                    }

                    @Override
                    public void processElement(WaterSensor value, Context ctx, Collector<String> out) throws Exception {

                        mapState.put(value.getVc(),"");

                        // ctx.getCurrentKey() 获取的是按照keyby之后的key
                        // mapState.keys()  获取的是存的水位值

                        out.collect(ctx.getCurrentKey() + " "+ mapState.keys().toString());

                    }
                })
                .print();

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }

}
