package com.atgugu.flink.chapter07.state;

import com.atgugu.flink.bean.WaterSensor;
import com.atgugu.flink.util.AtguiguUtil;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

import java.util.List;

/**
 * @Author lzc
 * @Date 2022/4/6 10:02
 */
public class Flink09_StateBackend {
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 10000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        
        env.enableCheckpointing(3000); // 开启checkpoint: 状态会进行远程的存储
        
        // 1. 内存
        // 1.1 旧
        //env.setStateBackend(new MemoryStateBackend());  // 默认值 本地内存 远程内存
        // 1.2 新
//       / env.setStateBackend(new HashMapStateBackend());  // 本地在内存
//        env.getCheckpointConfig().setCheckpointStorage(new JobManagerCheckpointStorage());  // 远程在内存
    
        // 2. 文件
        // 2.1 旧
//        env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/ck/fs"));  // 本地内存 远程在hdfs
        // 2.2 新
//        env.setStateBackend(new HashMapStateBackend());  // 本地在内存
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck1");  // 远程在文件
         // 3. rocksdb
        // 3.1 旧
       // env.setStateBackend(new RocksDBStateBackend("hdfs://hadoop162:8020/ck2"));
        // 3.2 新
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck3");
        
        
        env
            .socketTextStream("hadoop162", 9999)
            .map(line -> {
                String[] data = line.split(",");
                return new WaterSensor(data[0], Long.valueOf(data[1]), Integer.valueOf(data[2]));
            })
            .keyBy(WaterSensor::getId)
            .process(new KeyedProcessFunction<String, WaterSensor, String>() {
                
                
                private MapState<Integer, Object> mapState;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    mapState = getRuntimeContext().getMapState(
                        new MapStateDescriptor<Integer, Object>("mapState", Integer.class, Object.class));
                }
                
                @Override
                public void processElement(WaterSensor value,
                                           Context ctx,
                                           Collector<String> out) throws Exception {
                    mapState.put(value.getVc(), new Object());
    
                    List<Integer> keys = AtguiguUtil.toList(mapState.keys());
                    
                    out.collect(ctx.getCurrentKey() + " 所有不重复的水位: " + keys);
                    
    
                }
                
                
            })
            .print();
        
        env.execute();
    }
    
}
/*
1. 在flink的配置文件 flink.conf 中配置job默认的状态后端

2. 在代码中配置单个job的状态后端


 */