package com.atguigu.edu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.edu.realtime.common.Constant;
import com.google.gson.JsonObject;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Properties;

import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;


/**
     独立访客事务事实表：
     从dwd_traffic_page_log读取记录，对访客进行去重
     //todo 待改进 有时间补充CEP相关知识点
 */
public class Dwd_TrafficLogUserJump {

    public static final String ckAndGroupIdAndJobName = "Dwd_TrafficLogUserJump";
    public static final String source_topic = Constant.TOPIC_DWD_TRAFFIC_PAGE;
    public static final Integer port = 3003;
    // todo 这个地方后期可以优化改为执行传参
    public static final Integer parallelism = 2;

    public static void main(String[] args) {
        /**
            1、设置系统用户
         */
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        /**
            2、创建Stream执行环境
         */
        // 2.1 配置Stream环境配置
        Configuration configuration = new Configuration();
        configuration.setString("pipeline.name", ckAndGroupIdAndJobName);
        configuration.setInteger("rest.port", port);
        // 2.2设置Stream流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
        env.setParallelism(parallelism);    //并行度 设置和kafka的分区数一样
        // checkpoint相关
        env.setStateBackend(new HashMapStateBackend());
        env.enableCheckpointing(3000);  //checkpoint开启并设置周期
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/edu/" + ckAndGroupIdAndJobName);
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);

        /**
            3、读取kafka的dwd_traffic_page数据：得到dwd_traffic_page流数据
                env.fromSource(参数1,参数2,参数3)
         */
        DataStreamSource<String> trafficPageSource = env.fromSource(
                KafkaSource.<String>builder()
                        .setBootstrapServers(Constant.KAFKA_BROKERS)
                        .setTopics(source_topic)
                        .setGroupId(ckAndGroupIdAndJobName)
                        .setStartingOffsets(OffsetsInitializer.latest())
                        .setValueOnlyDeserializer(new DeserializationSchema<String>() {
                            @Override
                            public String deserialize(byte[] message) throws IOException {
                                if (message != null) {
                                    return new String(message, StandardCharsets.UTF_8);
                                }
                                return null;
                            }

                            @Override
                            public boolean isEndOfStream(String s) {
                                return false;
                            }

                            @Override
                            public TypeInformation<String> getProducedType() {
                                return TypeInformation.of(new TypeHint<String>() {
                                });
                            }
                        })
                        .setProperty("isolation.level", "read_committed")
                        .build()

                , WatermarkStrategy.noWatermarks()

                , "kafka-source"
        );

        /**
            4、
                (1) JsonString转为JsonObject
                (2) 指定水印
                (3) 按照会话分组 keyBy common.sid
         */

        KeyedStream<JSONObject, String> keyedStream =
                trafficPageSource
                .map(jsonstr -> JSON.parseObject(jsonstr)) //(1) JsonString转为JsonObject
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy.<JSONObject>forBoundedOutOfOrderness(Duration.ofSeconds(5))
                                .withTimestampAssigner(new SerializableTimestampAssigner<JSONObject>() {
                                    @Override
                                    public long extractTimestamp(JSONObject jsonObject, long l) {
                                        return jsonObject.getLong("ts");
                                    }
                                })
                )//(2) 指定水印
                .keyBy(jsonObject -> jsonObject.getJSONObject("common").getString("sid")); // (3) 按照会话分组 keyBy common.sid

        /**
                6.利用状态编程和定时器 对同一组的数据进行去重 筛选出跳出会话数据 发送到下游
         */
        /**todo 6、判断跳出行为！！！ 这里写法有缺陷。有时间把CEP补充下，换CEP的实现方式。
             离线数仓的实现方案是按照mid，然后按照时间排序，再看last_page_id实现的，但是实时不能这么做。因为离线数仓的数据是完整放在那的
             分析：一次会话只访问一个页面
             1、如果没有session_id，跳出行为判定只能通过超时时间 为10s，比如有一条数据对应的事件时间是15s，
             当水位线大于等于25s的时候我们就可以确定这个用户已经跳出。
             2、因为我们设置窗口大小是10s，所以事件时间15秒的这条数据会进到 10~20s的这个窗口，
             但是如果我们拿到数据的时候水位线watermark已经推到25s了，它所属的10~20的
             这个窗口已经被销毁了，就会导致跳出会话数永远是0，这是有问题的，所以我们要设置窗口延迟关闭。
             延迟多久呢？延迟关闭时间大于等于跳出判定的超时时间才能保证跳出数据不会被漏掉
             方案：一 用session窗口，缺陷：短时间连续两次的跳出判断为一次会话
                  二 状态编程 + 定时器 对同一组的数据进行去重 筛选出跳出会话数据 发送到下游。
                    这里我选择这种方案，但是还是有缺陷的，数据乱序会有问题的
         */
        SingleOutputStreamOperator<JSONObject> processDS = keyedStream.process(new KeyedProcessFunction<String, JSONObject, JSONObject>() {
            ValueState<JSONObject> lastObjectState;

            @Override
            public void open(Configuration parameters) throws Exception {
                lastObjectState = getRuntimeContext().getState(new ValueStateDescriptor<JSONObject>("jumpObjectState", JSONObject.class));
            }

            @Override
            public void processElement(JSONObject jsonObject, Context context, Collector<JSONObject> out) throws Exception {
                if (lastObjectState.value() == null) {
                    lastObjectState.update(jsonObject);
                    long currentProcessingTime = context.timerService().currentProcessingTime();
                    context.timerService().registerProcessingTimeTimer(currentProcessingTime + 10000L);
                }
            }

            @Override
            public void onTimer(long timestamp, OnTimerContext ctx, Collector<JSONObject> out) throws Exception {
                if (lastObjectState.value() != null) {
                    out.collect(lastObjectState.value());
                }

            }
        });

        /**
         7.把跳出数据写到kafka的dwd_traffic_user_jump主题中
         */
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constant.KAFKA_BROKERS);
        //transaction.max.timeout.ms broker 的参数: 15 分钟, 生成者的设置不能超过这个值
        //transaction.timeout.ms  flink 的生产者默认是 60 分钟, 这个值应该小于transaction.max.timeout.ms
        props.setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "");
        processDS.print(">>>>>>>>>>>>>>");
        processDS.map(jsonObject -> jsonObject.toJSONString())
                .addSink(
                        // new FlinkKafkaProducer<String>() 获取Kafka生产者，写入到kafka的dwd_traffic_unique_visitor主题
                        new FlinkKafkaProducer<String>(
                                "default",
                                new KafkaSerializationSchema<String>() {
                                    @Override
                                    public ProducerRecord<byte[], byte[]> serialize(String element, @Nullable Long aLong) {
                                        return new ProducerRecord<>(Constant.TOPIC_DWD_TRAFFIC_USER_JUMP, element.getBytes(StandardCharsets.UTF_8));
                                    }
                                },

                                props,

                                FlinkKafkaProducer.Semantic.EXACTLY_ONCE
                        )

                );

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }


    }

}
