package com.atguigu.app.dwm;

import com.alibaba.fastjson.JSONAware;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.common.DWMStaticConstants;
import com.atguigu.utils.KafkaUtil;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.cep.CEP;
import org.apache.flink.cep.PatternSelectFunction;
import org.apache.flink.cep.PatternStream;
import org.apache.flink.cep.PatternTimeoutFunction;
import org.apache.flink.cep.pattern.Pattern;
import org.apache.flink.cep.pattern.conditions.IterativeCondition;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.OutputTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.List;
import java.util.Map;

/**
 * @Author:GaoFei
 * @Description:
 * @Date:Created in 18:11
 * @Modified By:
 */
public class UserJumpApp {
    private static Logger logger = LoggerFactory.getLogger("uvLog");

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 和kafka的分区数量保持一致
        env.setParallelism(1);
        /**
         * 生产环境下一定要写的，不过学习阶段还要开启hdfs比较麻烦先关闭
         *         System.setProperty("HADOOP_USER_NAME","atguigu");
         *         env.setStateBackend(new FsStateBackend(OdsStaticConstants.CHECK_POINT_URL));
         *         // 设置checkpoint时间
         *         env.enableCheckpointing(5000L);
         *         // 设置精准一次性
         *         env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
         *         // 设置check超时时间
         *         env.getCheckpointConfig().setCheckpointTimeout(10000L);
         *         // 设置最多两个checkpoint一块
         *         env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
         *         // 设置两个check间隔
         *         env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
         */
        DataStreamSource<String> dataStreamSource =
                env.addSource(KafkaUtil.getKafkaConsumer(DWMStaticConstants.DWM_SOURCE_TOPIC, DWMStaticConstants.DWM_USER_JUMP_GROUP_ID));

        DataStream<JSONObject> assignTimestampsAndWatermarks = dataStreamSource
                .map(JSONObject::parseObject)
                .assignTimestampsAndWatermarks(WatermarkStrategy
                        .<JSONObject>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                        .withTimestampAssigner((element, recordTimestamp) -> element.getLong("ts")));
        // 定义CEP Pattern格式
        // todo  跳出率不是就来一条数据
        Pattern<JSONObject, JSONObject> pattern = Pattern
                .<JSONObject>begin("start")
                .where(new IterativeCondition<JSONObject>() {
                    @Override
                    public boolean filter(JSONObject jsonObject, Context<JSONObject> context) throws Exception {
                        String res = jsonObject.getJSONObject("page").getString("last_page_id");
                        return res == null || res.length() <= 0;
                    }
                }).times(2).consecutive()
                .within(Time.seconds(10));
//                .next("next")
//                .where(new IterativeCondition<JSONObject>() {
//                    @Override
//                    public boolean filter(JSONObject jsonObject, Context<JSONObject> context) throws Exception {
//                        String res = jsonObject.getJSONObject("page").getString("last_page_id");
//                        return res == null || res.length() <= 0;
//                    }
//                }).within(Time.seconds(10));
        PatternStream<JSONObject> selectResult =
                CEP.pattern(assignTimestampsAndWatermarks
                            .keyBy(f -> f.getJSONObject("common").getString("mid")), pattern);
        // 接收迟到数据
        OutputTag outputTag = new OutputTag<JSONObject>("late") {};
        SingleOutputStreamOperator result = selectResult.select(outputTag, (map, l) -> map.get("start").get(0), map -> map.get("start").get(0));
        DataStream<JSONObject> sideOutput = result.getSideOutput(outputTag);
        // TODO 迟到数据如何union？
        DataStream<JSONObject> union = sideOutput.union(result);
        union.map(JSONAware::toJSONString)
                .addSink(KafkaUtil.getKafkaProducter(DWMStaticConstants.DWM_USER_JUMP_DETAIL_TOPIC));
        union.print(">>>>>>");

        env.execute();
    }
}

