package com.atguigu.flink.chapter07.state;

import com.atguigu.flink.bean.WaterSensor;
import com.atguigu.flink.util.KKutil;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.runtime.state.storage.JobManagerCheckpointStorage;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

import java.util.List;


public class Flink10_State_Backend {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 20000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        //设置环境变量，用于登录hdfs 的账户
        System.setProperty("HADOOP_USER_NAME","atguigu");

        env.enableCheckpointing(3000); //启用checkpoint,才会把状态保存到远端（持久化）

        //1.内存
        // 旧
    //   env.setStateBackend(new MemoryStateBackend()); //默认配置

        //新
    //    env.setStateBackend(new HashMapStateBackend());
     //   env.getCheckpointConfig().setCheckpointStorage(new JobManagerCheckpointStorage());
        //2.fs
        //旧
     //   env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/ck"));
        //新
    //    env.setStateBackend(new HashMapStateBackend());
     //   env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck11");
        //3.rocksDB
        //旧
     //   env.setStateBackend(new RocksDBStateBackend());
        //新
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/ck3");
        env
                .socketTextStream("hadoop162", 9999)
                .map(line ->{
                    String[] strings = line.split(",");
                    return new WaterSensor(
                            strings[0],
                            Long.valueOf(strings[1]),
                            Integer.valueOf(strings[2]));
                })

                .keyBy(WaterSensor::getId)

                .process(new KeyedProcessFunction<String, WaterSensor, String>() {


                    private MapState<Integer, Object> vcState;

                    @Override
                    public void open(Configuration parameters) throws Exception {

                        vcState = getRuntimeContext().getMapState(new MapStateDescriptor<Integer, Object>(
                                "vcState", Integer.class, Object.class
                        ));
                    }

                    @Override
                    public void processElement(WaterSensor value,
                                               Context ctx,
                                               Collector<String> out) throws Exception {

                        vcState.put(value.getVc(),new Object());
                        List<Integer> list = KKutil.toList(vcState.keys());
                        out.collect(ctx.getCurrentKey()+" "+list.toString());

                    }
                })

                .print();





        env.execute();

    }
}
