package com.atguigu.edu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.edu.realtime.common.Constant;
import com.atguigu.edu.realtime.utils.AtguiguUtil;
import com.atguigu.edu.realtime.utils.FlinkSinkUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.RichFilterFunction;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

/**
    独立访客事务事实表：
            从dwd_traffic_page_log读取记录，对访客进行去重
 */
public class Dwd_TrafficLogUniqueVisitor_Deduplication  {

    public static final String ckAndGroupIdAndJobName = "Dwd_TrafficLogUniqueVisitor_Deduplication";
    public static final String source_topic = Constant.TOPIC_DWD_TRAFFIC_PAGE;
    public static final Integer port = 3002;
    // todo 这个地方后期可以优化改为执行传参
    public static final Integer parallelism = 2;

    // todo（为了练习 这里不用上课的set集合去重的方式了，也不封装了）
    public static void main(String[] args) {
        /**
            1、设置系统用户：windows下不设置会报权限问题
         */
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        /**
            2、创建Stream执行环境
         */
        //------2.1 设置Stream环境配置
        Configuration configuration = new Configuration();
        configuration.setString("pipeline.name", ckAndGroupIdAndJobName);
        configuration.setInteger("rest.port", port);
        //------2.2 设置Stream流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);    // 并行度
        // checkpoint相关：①设置状态后端 ①周期、②模式、③存储、④超时、⑤任务取消保留策略 ⑥ checkpoint间隔
        env.setStateBackend(new HashMapStateBackend());
        env.enableCheckpointing(3000);                                                  // 启动checkPoint防止状态丢失，同时设置周期
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE); //设置checkpoint存储目录
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/edu/" + ckAndGroupIdAndJobName);//checkpoint存储位置
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);// 当job取消的时候，checkpoint保留的策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);

        /**
            3、读取kafka里的dwd_traffic_page数据: 得到dwd_traffic_page流数据
                    env.fromSource(参数1,参数2,参数3)
         */
        DataStreamSource<String> pageStrKafkaDs = env.fromSource(
                //-----3.1以下对应FlinkSinkUtil.getKafkaSink()
                // 【参数1】source: 通过KafkaSource的构造模式builder, 获取kafka的Source
                KafkaSource.<String>builder()
                        .setBootstrapServers(Constant.KAFKA_BROKERS)
                        .setTopics(source_topic)
                        .setGroupId(ckAndGroupIdAndJobName)
                        .setStartingOffsets(OffsetsInitializer.latest())
                        //      自定义反序列器。这里是为了解决flink左连接时删除数据，写入的是null数据产生的问题。这里暂时没用到left join 随手练习下
                        .setValueOnlyDeserializer(new DeserializationSchema<String>() {
                            @Override
                            public String deserialize(byte[] message) throws IOException {
                                // left join 有删除数据时， 写入的null
                                if (message != null) {
                                    return new String(message, StandardCharsets.UTF_8);
                                }
                                return null; //返回null就不会被放到流里
                            }

                            @Override
                            public boolean isEndOfStream(String s) {
                                return false;//返回false，流就不会结束
                            }

                            // 返回流中的数据类型
                            @Override
                            public TypeInformation<String> getProducedType() {
//                    return Types.STRING;  // 特殊的类: 内置, 比如: 基本类型,string, tuple 等
//                    return TypeInformation.of(String.class);  // 普通的类: 类不能带泛型
                                return TypeInformation.of(new TypeHint<String>() {
                                });  // 普通的类: 类不能带泛型
                            }
                        })  //反序列化器
                        .setProperty("isolation.level", "read_committed")
                        .build()

                // 【参数2】设置水印
                , WatermarkStrategy.noWatermarks()

                // 【参数3】设置source的名字，没什么实际意义，给自己看的
                , "kafka-source"

        );

        /**
            4、对从kafka得到的dwd_traffic_page主题JsonStr流
                转为 JsonObject 方便后面操作
         */
        SingleOutputStreamOperator<JSONObject> pageObjKafkaDs = pageStrKafkaDs.map(jsonStr -> JSON.parseObject(jsonStr));

        //---- 数据准备好了，开始去重： ----
        // todo 先根据uid分组，然后利用状态存储访问日期，判断是否第一次访问

        /**
             5、根据uid分组
         */
        KeyedStream<JSONObject, String> keyByDs = pageObjKafkaDs.keyBy(
                jsonObject -> jsonObject.getJSONObject("common").getString("uid")
        );

        /**
            6、过滤去重访客记录
         */
        SingleOutputStreamOperator<JSONObject> processDs = keyByDs.process(new KeyedProcessFunction<String, JSONObject, JSONObject>() {

            private ValueState<String> lastVisitDt;

            @Override
            public void open(Configuration parameters) throws Exception {
                super.open(parameters);
            }

            @Override
            public void processElement(JSONObject jsonObject, Context context, Collector<JSONObject> out) throws Exception {
                lastVisitDt = getRuntimeContext().getState(
                        new ValueStateDescriptor<String>("last_visit_dt", String.class)
                );

                Long ts = jsonObject.getLong("ts");
                String visitDt = AtguiguUtil.tsToDate(ts);

                // 初始状态state的值是null || 当前数据新的日期 就在流输出
                if (lastVisitDt == null || !lastVisitDt.equals(visitDt)) {
                    lastVisitDt.update(visitDt);
                    out.collect(jsonObject);
                }

            }
        });


        /**
         7、数据写入到kafka中
         */
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constant.KAFKA_BROKERS);
        //transaction.max.timeout.ms broker 的参数: 15 分钟, 生成者的设置不能超过这个值
        //transaction.timeout.ms  flink 的生产者默认是 60 分钟, 这个值应该小于transaction.max.timeout.ms
        props.setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "");

        processDs.map(jsonObject -> jsonObject.toJSONString())
                .addSink(
                    // new FlinkKafkaProducer<String>() 获取Kafka生产者，写入到kafka的dwd_traffic_unique_visitor主题
                    new FlinkKafkaProducer<String>(
                            "default",
                            new KafkaSerializationSchema<String>() {
                                @Override
                                public ProducerRecord<byte[], byte[]> serialize(String element, @Nullable Long aLong) {
                                    return new ProducerRecord<>(Constant.TOPIC_DWD_TRAFFIC_UNIQUE_VISITOR, element.getBytes(StandardCharsets.UTF_8));
                                }
                            },

                            props,

                            FlinkKafkaProducer.Semantic.EXACTLY_ONCE
                    )

                );

        /**
          8、最后执行环境
         */
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }

}
