package com.zhu.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.zhu.utils.ZhuKafkaUtil;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.cep.CEP;
import org.apache.flink.cep.PatternSelectFunction;
import org.apache.flink.cep.PatternStream;
import org.apache.flink.cep.PatternTimeoutFunction;
import org.apache.flink.cep.pattern.Pattern;
import org.apache.flink.cep.pattern.conditions.SimpleCondition;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.OutputTag;


import java.time.Duration;
import java.util.List;
import java.util.Map;


/**
 * 为什么消费者组不一样  同一个组只会消费到一份数据
 */
public class DWDTrafficUserJumpDetailApp {

    public static void main(String[] args) throws Exception {

        //todo 1. environment
        StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        streamExecutionEnvironment.setParallelism(1);   //test  生产环境设置为 kafka主题的分区数量 4

        //checkPoint
        /*
        streamExecutionEnvironment.enableCheckpointing(5 * 60000L, CheckpointingMode.EXACTLY_ONCE); //精确一次

        //状态后端
        streamExecutionEnvironment.setStateBackend(new HashMapStateBackend());
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointStorage(ClusterParametersConfig.HDFS_CHECKPOINT_FILE_DIR);  //检查点保存在hdfs
        System.setProperty("HADOOP_USER_NAME", "zhu");
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);  //TimeOut
        streamExecutionEnvironment.getCheckpointConfig().setMaxConcurrentCheckpoints(2);  //最大共存检查点
        streamExecutionEnvironment.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 5 * 1000L));  //重启策略
         */
        //todo 2. kafka
        String topic = "dwd_traffic_page_log";
        String groupId = "user_jump_detail_zhu_2023";
        DataStreamSource<String> kafkaStream = streamExecutionEnvironment.addSource(ZhuKafkaUtil.getFlinkKafkaConsumer(topic, groupId));
        //todo 3. json
        SingleOutputStreamOperator<JSONObject> jsonObjectDStream = kafkaStream.map(JSON::parseObject);
        //todo 4. group by Mid
        KeyedStream<JSONObject, String> keyByMidDStream = jsonObjectDStream
                .assignTimestampsAndWatermarks(WatermarkStrategy.<JSONObject>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                        .withTimestampAssigner(new SerializableTimestampAssigner<JSONObject>() {
                            @Override
                            public long extractTimestamp(JSONObject jsonObject, long l) {
                                return jsonObject.getLong("ts");
                            }
                        }))
                .keyBy(json -> json.getJSONObject("common").getString("mid"));
        //todo 5. CEP
        //定义模式序列  循环模式 默认是宽松模式  需要设置成严格模式 follow next
        Pattern<JSONObject, JSONObject> pattern = Pattern.<JSONObject>begin("start").where(new SimpleCondition<JSONObject>() {
            @Override
            public boolean filter(JSONObject jsonObject) throws Exception {
                return jsonObject.getJSONObject("page").getString("last_page_id") == null;
            }
        }).next("next").where(new SimpleCondition<JSONObject>() {
            @Override
            public boolean filter(JSONObject jsonObject) throws Exception {
                return jsonObject.getJSONObject("page").getString("last_page_id") == null;
            }
        }).within(Time.seconds(10));//开十秒窗口 乱序数据  使用事件
//todo 6. put CEP on Stream
        PatternStream<JSONObject> cepPatternDStream = CEP.pattern(keyByMidDStream, pattern);
        //todo 7. get
        OutputTag<String> outputTag = new OutputTag<String>("timeOut") {};
        SingleOutputStreamOperator<String> selectDStream = cepPatternDStream.select(outputTag, new PatternTimeoutFunction<JSONObject, String>() {
                    //超时数据
                    @Override
                    public String timeout(Map<String, List<JSONObject>> map, long l) throws Exception {
                        return map.get("start").get(0).toJSONString();
                    }
                },
                //匹配上的数据
                new PatternSelectFunction<JSONObject,String>() {
                    @Override
                    public String select(Map<String, List<JSONObject>> map) throws Exception {
                        return map.get("start").get(0).toJSONString();
                    }
                });
        DataStream<String> timeOutDStream = selectDStream.getSideOutput(outputTag);
        //todo 8. join
        DataStream<String> unionJumpUserDStream = selectDStream.union(timeOutDStream);
        //todo 9. Write to Kafka
        selectDStream.print("Select>>>>>");
        timeOutDStream.print("TimeOut>>>>>");
        String targetTopic = "dwd_traffic_user_jump_detail";
        unionJumpUserDStream.addSink(ZhuKafkaUtil.getFlinkKafkaProducer(targetTopic));

        //todo 10. execute
        streamExecutionEnvironment.execute("dwd_traffic_user_jump");
    }
}
