package com.atguigu.gmall.realtime.app.dws;

import com.atguigu.gmall.realtime.bean.ProvinceStats;
import com.atguigu.gmall.realtime.utils.ClickhouseUtils;
import com.atguigu.gmall.realtime.utils.KafkaUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * 地区主题统计(使用 flink sql 实现)
 *
 * @author lvbingbing
 * @date 2022-07-05 22:04
 */
@Slf4j
public class ProvinceStatsApp {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        // 2. 检查点相关设置
        // 2.1 开启检查点
        env.enableCheckpointing(6000L, CheckpointingMode.EXACTLY_ONCE);
        // 2.2 设置检查点超时时间
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointTimeout(60000L);
        // 2.3 开启外部化检查点，作业取消时保留检查点
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 2.4 设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 6000L));
        // 2.5 设置状态后端
        env.setStateBackend(new FsStateBackend("hdfs://hadoop102:8020/ck/gmall"));
        // 2.6 指定操作 hdfs 用户
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        //  3. 创建表环境
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);
        // 4. 把数据源定义为动态表
        String groupId = "province_stats_group";
        String orderWideTopic = "dwm_order_wide";
        tableEnv.executeSql(
                "CREATE TABLE ORDER_WIDE (" +
                        " province_id BIGINT, " +
                        " province_name STRING, " +
                        " province_area_code STRING, " +
                        " province_iso_code STRING, " +
                        " province_3166_2_code STRING, " +
                        " order_id STRING, " +
                        " split_total_amount DOUBLE, " +
                        " create_time STRING, " +
                        " row_time AS TO_TIMESTAMP(create_time), " +
                        " WATERMARK FOR row_time AS row_time - INTERVAL '3' SECOND " +
                        ") WITH (" + KafkaUtils.getConnectInfo(orderWideTopic, groupId) + ")");
        // 5. 分组开窗聚合计算
        Table provinceStatsTable = tableEnv.sqlQuery("" +
                " select " +
                " DATE_FORMAT(window_start, 'yyyy-MM-dd HH:mm:ss') as stt, " +
                " DATE_FORMAT(window_end,   'yyyy-MM-dd HH:mm:ss') as edt, " +
                " province_id, " +
                " province_name, " +
                " province_area_code as area_code, " +
                " province_iso_code as iso_code, " +
                " province_3166_2_code as iso_3166_2, " +
                " count(distinct order_id) as order_count, " +
                " sum(split_total_amount) as order_amount, " +
                " UNIX_TIMESTAMP() * 1000 as ts " +
                " from TABLE (TUMBLE(TABLE ORDER_WIDE, DESCRIPTOR(row_time), INTERVAL '10' SECOND))" +
                " group by window_start, window_end, province_id, province_name, province_area_code, province_iso_code, province_3166_2_code");
        // 6. 将结果转换成流，将其写入到 clickhouse 中
        DataStream<ProvinceStats> provinceStatsDs = tableEnv.toAppendStream(provinceStatsTable, ProvinceStats.class);
        provinceStatsDs.map(s -> {
            log.info("地区主题数据：{}", s);
            return s;
        });
        provinceStatsDs.addSink(ClickhouseUtils.operateClickhouseJdbcSink("insert into  province_stats_2021 values(?,?,?,?,?,?,?,?,?,?)"));
        // 7. 触发程序执行
        env.execute();
    }
}
