package com.atguigu.gmall.realtime.app.dws;

import com.atguigu.gmall.realtime.bean.ProvinceStats;
import com.atguigu.gmall.realtime.util.ClickHouseUtil;
import com.atguigu.gmall.realtime.util.MyKafkaUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author: xu
 * @desc: 使用FlinkSQL对地区主题统计
 * 程  序：MockDB -> MySQL -> FlinkCDC -> Kafka(ZK) -> BaseDbApp -> Kafka/Phoenix(zk/hdfs/hbase) -> OrderWideApp(Redis) -> Kafka -> ProvinceStatsSqlApp -> ClickHouse
 */
public class ProvinceStatsSqlApp {
    public static void main(String[] args) throws Exception {
        // TODO 1.基本环境准备
        // 1.1  准备本地测试流环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 1.2 设置并行度
        env.setParallelism(1);
        // 1.3 设置Checkpoint
        // env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        // env.getCheckpointConfig().setCheckpointTimeout(60000);
        // env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        // env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // env.setStateBackend(new FsStateBackend("hdfs://node1:8020/gmall/checkpoint/ProvinceStatsSqlApp"))
        // System.setProperty("HADOOP_USER_NAME", "root");
        // 1.4 创建Table环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // TODO 2.使用DDL创建表 提取时间戳生成WaterMark
        String groupId = "province_stats";
        String orderWideTopic = "dwm_order_wide";

        tableEnv.executeSql("CREATE TABLE ORDER_WIDE (" +
                " province_id BIGINT," +
                " province_name STRING," +
                " province_area_code STRING," +
                " province_iso_code STRING," +
                " province_3166_2_code STRING," +
                " order_id STRING," +
                " split_total_amount DOUBLE," +
                " create_time STRING," +
                " rowtime AS TO_TIMESTAMP(create_time) ," +
                " WATERMARK FOR rowtime AS rowtime - INTERVAL '3' SECOND " +
                " ) WITH (" + MyKafkaUtil.getKafkaDDL(orderWideTopic, groupId) + ")");

        // TODO 3.分组、开窗、聚合计算
        Table provinceStateTable = tableEnv.sqlQuery("select" +
                "  DATE_FORMAT(TUMBLE_START(rowtime, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') stt," +
                "  DATE_FORMAT(TUMBLE_END(rowtime, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') edt," +
                "  province_id," +
                "  province_name," +
                "  province_area_code area_code," +
                "  province_iso_code iso_code ," +
                "  province_3166_2_code iso_3166_2," +
                "  COUNT(DISTINCT order_id) order_count," +
                "  SUM(split_total_amount) order_amount," +
                "  UNIX_TIMESTAMP() * 1000 ts" +
                " FROM order_wide" +
                " GROUP BY" +
                "  TUMBLE(rowtime, INTERVAL '10' SECOND )," +
                "  province_id," +
                "  province_name," +
                "  province_area_code," +
                "  province_iso_code," +
                "  province_3166_2_code");

        // TODO 4.将动态表转换为数据流
        DataStream<ProvinceStats> provinceStatsStream = tableEnv.toAppendStream(provinceStateTable, ProvinceStats.class);
        // DataStream<Tuple2<Boolean, ProvinceStats>> provinceStatsDS = tableEnv.toRetractStream(provinceStateTable, ProvinceStats.class);

        provinceStatsStream.print(">>>>");

        // TODO 5.将流中的数据保存到ClickHouse
        provinceStatsStream.addSink(
                ClickHouseUtil.getJdbcSink("insert into province_stats values(?,?,?,?,?,?,?,?,?,?)")
        );

        env.execute(ProvinceStatsSqlApp.class.getSimpleName());
    }
}