package com.xinqing.bigdata.flink.datastream.lateness;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.xinqing.bigdata.flink.datastream.sink.Kafka2KuduWithWaterMark;
import com.xinqing.bigdata.flink.datastream.model.UserCostInfo;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.util.Properties;

/**
 * @Author:CHQ
 * @Date:2021/5/26 15:04
 * @Description
 */
public class SideOutTest {
    public static void main(String args[]) throws Exception {

        Logger logger = LoggerFactory.getLogger(Kafka2KuduWithWaterMark.class);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration()); //固定Web-UI端口
        // executionConfig.setAutoWatermarkInterval(200); //设置自动提交wm的时间间隔，避免每条记录生成WM后提交一次，那样会影响程序性能
        env.setParallelism(1);
        // env.setStateBackend(new FsStateBackend("hdfs://10.201.7.114:8020/data_team/chq/flink2/"));  过时了

        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointStorage("hdfs://10.201.7.114:8020/data_team/chq/flink2/"); //开启checkpoint的前提条件->选择状态后端,此处为hdfs
        checkpointConfig.setCheckpointInterval(30000); //设置CheckPoint时间间隔
        checkpointConfig.setCheckpointTimeout(60000);//设置CheckPoint超时时间，失败则弃用
        checkpointConfig.setMaxConcurrentCheckpoints(1); //最多同时允许存在一个CheckPoint
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);//当作业被取消的时候(集群宕机),保留checkpoints，默认会删除

        //Kafka集群配置
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "10.201.7.175:9092");
        properties.setProperty("group.id", "chq1");
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("auto.offset.reset", "latest");

        OutputTag<UserCostInfo> tag = new OutputTag<UserCostInfo>("late_data_output_tag") {
        };

        SingleOutputStreamOperator<Tuple2<String, Long>> process = env.addSource(new FlinkKafkaConsumer<>("cost3", new SimpleStringSchema(), properties))
                .map((MapFunction<String, UserCostInfo>) value -> {
                    JSONObject jsonObject = JSON.parseObject(value);
                    String sid = jsonObject.getString("sid");
                    Double money = jsonObject.getDouble("money");
                    Long timestamp = jsonObject.getLong("timestamp");
                    return new UserCostInfo(sid, money, timestamp);
                })
                //设置了滚动事件窗口，就必须配合wm使用，并设置空闲数据源的最大超时时间,一旦在规定时间内窗口中没有数据流入，则自动关窗
                .assignTimestampsAndWatermarks(WatermarkStrategy.<UserCostInfo>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                        .withTimestampAssigner((SerializableTimestampAssigner<UserCostInfo>) (element, recordTimestamp) -> element.getTimestamp()))
                .keyBy((KeySelector<UserCostInfo, String>) value -> value.getSid())
                .window(TumblingEventTimeWindows.of(Time.seconds(10)))
                .allowedLateness(Time.seconds(2))
                .sideOutputLateData(tag)
                .process(new ProcessWindowFunction<UserCostInfo, Tuple2<String, Long>, String, TimeWindow>() {
                    @Override
                    public void process(String s, Context context, Iterable<UserCostInfo> elements, Collector<Tuple2<String, Long>> out) throws Exception {
                        logger.info("关窗开始时间================>" + context.window().getStart());
                        for (UserCostInfo element : elements) {
                            //注意此时的wm是触发当前窗口关闭的第一条记录的时间戳-orderness时间
                            logger.info("元素信息===============>{}", element.toString());
                            out.collect(new Tuple2<>(element.getSid(), element.getTimestamp()));
                        }
                        logger.info("关窗结束时间================>" + context.window().getEnd());
                    }
                });

        //以json串的格式输出迟到数据(迟到数据所属窗口的WindowEnd+lateness<=currentWaterMark)到kafka
        process.getSideOutput(tag).addSink(new FlinkKafkaProducer("10.201.7.175:9092", "cost_late_data",
                (SerializationSchema<UserCostInfo>) element -> JSON.toJSONString(element).getBytes()));

        env.execute();
    }
}
