package day04;

import day03.RichHdfsSourceFunction2;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.memory.MemoryStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * hdfs-》 国家的数据
 * kafka-》国家的缩写--》匹配hdfs
 *
 * 匹配不到 keyby解决
 */
public class TestBroadCast {
    //探测对象
    private static final MapStateDescriptor<String,String> mapDesc = new MapStateDescriptor<String, String>("hdfsMapState",String.class,String.class);
    public static void main(String[] args) throws Exception {
        /**
         * 1.创建env
         * 2.设置重启策略
         * 3.设置一个checkpoint
         * 4.设置checkpoint的外部 存储
         * 5.写代码
         */
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());

        //设置恢复策略
        env.setRestartStrategy(
                RestartStrategies.fixedDelayRestart(
                        3, // number of restart attempts
                        Time.of(0, TimeUnit.SECONDS) // delay
                )
        );

        //设置Checkpoint
        env.enableCheckpointing(1000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setFailOnCheckpointingErrors(true);

        //设置外部存储
        // checkpointing每隔一段时间将状态保存到State Backend（默认异步）
        env.setStateBackend(new MemoryStateBackend(1024*1024*10,true));


        DataStreamSource<String> hdfsDs = env.addSource(new RichHdfsSourceFunction2("/user/yeniu/data/country_data1"));
        //创建广播流
        //将hdfs变为广播流
        BroadcastStream<String> broadcast = hdfsDs.broadcast(mapDesc);



        //kafka
        Properties properties = new Properties();
        //连不上第一台kafka 连第二台
        // properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092，s3.hadoop:9092");
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"s1.hadoop:9092");
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"group_33");//消费者组
        properties.setProperty("flink.partition-discovery.interval-millis", "5000");
        FlinkKafkaConsumer010<String> kafkaSource = new FlinkKafkaConsumer010<>("topic_33", new SimpleStringSchema(), properties);
        kafkaSource.setStartFromLatest();
        DataStreamSource<String> kafkaDs = env.addSource(kafkaSource);

                                                                    //hdfs String类型   返回类型
        kafkaDs.connect(broadcast).process(new BroadcastProcessFunction<String, String, String>() {


            //kafka数据
            @Override//hdfs死机后可以获取广播状态并恢复
            public void processElement(String value, ReadOnlyContext ctx, Collector<String> out) throws Exception {
                System.out.println("hdfs--》"+value);
                ReadOnlyBroadcastState<String, String> broadcastState = ctx.getBroadcastState(mapDesc);
                for(Map.Entry<String, String> en : broadcastState.immutableEntries()){
                    System.out.println(en.getKey()+"-->"+en.getValue());
                }
                String addr = broadcastState.get(value);
                if(addr==null){
                    addr="unknow";
                }
                out.collect(addr);
            }

            //kafka数据
            @Override //流 进来数据一条条累加 放入状态中
            public void processBroadcastElement(String value, Context ctx, Collector<String> out) throws Exception {
                System.out.println("broadcast--》"+value);
                BroadcastState<String, String> broadcastState = ctx.getBroadcastState(mapDesc);
                String[] arr = value.split("\t");
                broadcastState.put(arr[0],arr[1]);
            }
        }).print();

        env.execute();

        
    }
}
