package com.nepu.gmall.realtime.app.dws;

import com.alibaba.fastjson.JSON;
import com.nepu.gmall.realtime.bean.UserRegisterBean;
import com.nepu.gmall.realtime.util.ClickHouseUtil;
import com.nepu.gmall.realtime.util.DateFormatUtil;
import com.nepu.gmall.realtime.util.KafkaUtils;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.AllWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;

import java.time.Duration;

/**
 * 用户域用户注册各窗口汇总表
 * 从 DWD 层用户注册表中读取数据，统计各窗口注册用户数，写入 ClickHouse。
 *
 * 因为我们dwd层已经做过用户注册的试试表，所以直接从中读取数据，然后开窗聚合就可以了
 * （1）加载执行环境
 * （2）从kafka的dwd_user_register读取数据
 * （3）转换数据结构
 * （4）提取事件时间，生成watermark
 * （5）开窗、聚合
 * （6）将数据写入到clickHouse中
 *
 * 数据的流向
 * mock --> mysql --> maxwell --> kafka(topic_db) --> DwdUserRegister.class --> kafka --> DwsUserUserRegisterWindow.class --> clickHouse
 * @author chenshuaijun
 * @create 2023-03-01 20:29
 */
public class DwsUserUserRegisterWindow {

    public static void main(String[] args) throws Exception {
        // TODO 1、加载执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 生产环境下是一定不会将任务的并行度设置为1的，这里具体的设置是和我们等下要读取的kafka的相应的主题的分区的个数相同
        env.setParallelism(1);
        // 设置checkpoint的信息：设置checkpoint的间隔是5分钟,并且checkpoint的级别是精确一次性
        /*env.enableCheckpointing(5 * 60000L, CheckpointingMode.EXACTLY_ONCE);
        // 设置checkpoint的超时时间是10分钟
        env.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);
        // 设置外部检查点。可以将检查点的元数据信息定期写入外部系统，这样当job失败时，检查点不会被清除。这样如果job失败，可以从检查点恢复job。
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 设置checkpoint的重启的策略
        env.setRestartStrategy(RestartStrategies.failureRateRestart(10, Time.of(1L, TimeUnit.DAYS), Time.of(3L, TimeUnit.MINUTES)));
        // 设置两个checkpoint之间的最小的间隔时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
        // 设置状态后端: 设置状态后端为内存级别
        env.setStateBackend(new HashMapStateBackend());
        // 设置checkpoint的存储的路径
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/checkpoint");
        // 因为我们的HDFS只有atguigu用户才能够操作，所以要将用户设置为atguigu
        System.setProperty("HADOOP_USER_NAME", "atguigu");*/

        // TODO 2、从kafka的topic中读取数据
        String topic = "dwd_user_register";
        DataStreamSource<String> kafkaSourceStream = env.addSource(KafkaUtils.getKafkaConsumer(topic, "DwsUserUserRegisterWindow"));
        // TODO 3、转换数结构
        SingleOutputStreamOperator<UserRegisterBean> mapDataStream = kafkaSourceStream.map(new MapFunction<String, UserRegisterBean>() {
            @Override
            public UserRegisterBean map(String value) throws Exception {
                String createTime = JSON.parseObject(value).getString("create_time");
                Long ts = DateFormatUtil.toTs(createTime, true);
                return new UserRegisterBean("", "", 1L, ts);
            }
        });
        // TODO 4、提取事件时间，生成 watermark
        SingleOutputStreamOperator<UserRegisterBean> watermarkStream = mapDataStream.assignTimestampsAndWatermarks(WatermarkStrategy.<UserRegisterBean>forBoundedOutOfOrderness(Duration.ofSeconds(2)).withTimestampAssigner(new SerializableTimestampAssigner<UserRegisterBean>() {
            @Override
            public long extractTimestamp(UserRegisterBean element, long recordTimestamp) {
                return element.getTs();
            }
        }));
        // TODO 5、开窗、聚合
        SingleOutputStreamOperator<UserRegisterBean> reduceStream = watermarkStream.windowAll(TumblingProcessingTimeWindows.of(Time.seconds(10)))
                .reduce(new ReduceFunction<UserRegisterBean>() {
                    @Override
                    public UserRegisterBean reduce(UserRegisterBean value1, UserRegisterBean value2) throws Exception {
                        value1.setRegisterCt(value1.getRegisterCt() + value2.getRegisterCt());
                        return value1;
                    }
                }, new AllWindowFunction<UserRegisterBean, UserRegisterBean, TimeWindow>() {
                    @Override
                    public void apply(TimeWindow window, Iterable<UserRegisterBean> values, Collector<UserRegisterBean> out) throws Exception {
                        UserRegisterBean next = values.iterator().next();
                        next.setStt(DateFormatUtil.toYmdHms(window.getStart()));
                        next.setEdt(DateFormatUtil.toYmdHms(window.getEnd()));
                        next.setTs(System.currentTimeMillis());
                        out.collect(next);
                    }
                });

        reduceStream.print(">>>>>>>>>>>>>>>>");
        // TODO 6、将数据写出到CLickHouse
        reduceStream.addSink(ClickHouseUtil.getJdbcSink("insert into dws_user_user_register_window values(?,?,?,?)"));

        // TODO 7、执行
        env.execute("DwsUserUserRegisterWindow");

    }
}
