package com.app.dws;

import com.bean.ProductStats;
import com.bean.ProvinceStats;
import com.common.GlobalConfig;
import com.utils.ClickHouseUtil;
import com.utils.MyKafkaUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Description: TODO QQ1667847363
 * @author: xiao kun tai
 * @date:2022/1/10 22:26
 */

//TODO: 数据流 web/app -> Nginx -> Springboot  -> Kafka(ods)  ->  FlinkApp -> Kafka/HBase(dwd-dim) -> FlinkApp(redis) -> Kafka(DWM) -> FlinkApp -> ClickHouse
//TODO: 程序 mockLog -> Mysql -> FlinkCDC -> Kafka(ZK) -> BaseDbApp -> Kafka/Phoenix(ZK/hdfs/hbase) -> OrderWideApp(Redis) -> Kafka(ZK) -> ProvinceStatsSqlApp -> ClickHouse
public class ProvinceStatsSqlApp {
    public static void main(String[] args) throws Exception {
        //TODO:1.获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1); //生产环境，与Kafka分区数保持一致

        EnvironmentSettings settings = EnvironmentSettings
                .newInstance()
                .inStreamingMode()
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        //TODO:Flink-CDC 将读取 binlog 的位置信息以状态的方式保存在 CK,如果想要做到断点续传,需要从 Checkpoint 或者 Savepoint 启动程序
        /*//开启CK并指定状态后端为FS menory fs rocksdb
        env.setStateBackend(new FsStateBackend("hdfs://192.168.88.109:9820/gmall-flink/ck"));
        //开启 Checkpoint,每隔 5 秒钟做一次 CK
        env.enableCheckpointing(5000L);
        //指定 CK 的一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(10000L);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000);
        //设置任务关闭的时候保留最后一次 CK 数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //指定从 CK 自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,2000L));
        //设置访问 HDFS 的用户名
        System.setProperty("HADOOP_USER_NAME", "root");*/


        //TODO:2.使用DDL创建表 提取时间戳生成 WaterMark
        String groupId = "province_stats"+ GlobalConfig.NUMBER;
        String orderWideTopic = "dwm_order_wide";

        tableEnv.executeSql("CREATE TABLE order_wide (" +
                "`province_id` BIGINT, " +
                "`province_name` STRING, " +
                "`province_name` STRING," +
                "`province_area_code` STRING," +
                "`province_iso_code` STRING," +
                "`province_3166_2_code` STRING," +
                "`order_id` STRING, " +
                "`split_total_amount` DECIMAL," +
                "`create_time` STRING," +
                "`rt` AS TO_TIMESTAMP(create_time) ," +
                "WATERMARK FOR rt AS rt - INTERVAL '1' SECOND)" +
                " WITH (" + MyKafkaUtil.getKafkaDDL(orderWideTopic, groupId) + ")");


        //TODO:3.查询数据   分组、开窗、聚合
        Table table =
                tableEnv.sqlQuery("select " +
                        "DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') stt, " +
                        "DATE_FORMAT(TUMBLE_END(rt, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') edt , " +
                        "province_id," +
                        "province_name," +
                        "province_area_code," +
                        "province_iso_code," +
                        "province_3166_2_code," +
                        "count(distinct order_id) order_count," +
                        "sum(split_total_amount) order_amount," +
                        "UNIX_TIMESTAMP()*1000 ts " +
                        " from order_wide group by TUMBLE(rt, INTERVAL '10' SECOND )," +
                        " province_id, province_name, province_area_code, province_iso_code, province_3166_2_code");


        //TODO:4.将动态表转换为流
        DataStream<ProvinceStats> productStatsDataStream =
                tableEnv.toAppendStream(table, ProvinceStats.class);



        //TODO:5.打印数据  并 写入ClickHouse
        System.out.println("任务开始>>>>>>>>>");
        productStatsDataStream.print();
        productStatsDataStream
                .addSink(ClickHouseUtil.<ProvinceStats>getSink("insert into province_stats_2021 values(?,?,?,?,?,?,?,?,?,?)"));


        //TODO:6.启动任务
        env.execute("ProvinceStatsSqlApp");
    }
}
