package com.atguigu.flink0624.chapter07.state;

import com.atguigu.flink0624.bean.WaterSensor;
import com.atguigu.flink0624.util.AtguiguUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

import java.time.Duration;
import java.util.List;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/11/15 15:25
 */
public class Flink09_StateBackend {
    
    public static void main(String[] args) throws Exception {
        System.setProperty("HADOOP_USER_NAME", "atguigu"); // shift+ctrl+u
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        
        env.enableCheckpointing(3000);
        // 1. 内存
        //  旧的
//        env.setStateBackend(new MemoryStateBackend());  // 默认
        // 新的
//        env.setStateBackend(new HashMapStateBackend());
//        env.getCheckpointConfig().setCheckpointStorage(new JobManagerCheckpointStorage());
        
        // 2. fs
        // 旧的
//        env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/ck"));
//        env.setStateBackend(new HashMapStateBackend());
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck1");
        // 3. rockdb
//        env.setStateBackend(new RocksDBStateBackend("hdfs://hadoop162:8020/ck2"));
        
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck3");
        
        // 返回值就是每条数据的事件时间
        env
            .socketTextStream("hadoop162", 9999)
            .map(line -> {
                String[] data = line.split(",");
                return new WaterSensor(data[0], Long.valueOf(data[1]), Integer.valueOf(data[2]));
            })
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<WaterSensor>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((element, ts) -> element.getTs())
            )
            .keyBy(WaterSensor::getId)
            .process(new KeyedProcessFunction<String, WaterSensor, String>() {
                
                private MapState<Integer, Object> vcMapState;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    vcMapState = getRuntimeContext()
                        .getMapState(new MapStateDescriptor<Integer, Object>(
                            "vcMapState",
                            Integer.class,
                            Object.class
                        ));
                }
                
                @Override
                public void processElement(WaterSensor value,
                                           Context ctx,
                                           Collector<String> out) throws Exception {
                    vcMapState.put(value.getVc(), null);
                    
                    Iterable<Integer> it = vcMapState.keys();
                    List<Integer> vcs = AtguiguUtil.toList(it);
                    out.collect(ctx.getCurrentKey() + "  " + vcs);
                }
            })
            .print();
        
        env.execute();
        
    }
    
}
