package com.mjf.app.dws;

import com.mjf.app.dwd.BaseDbApp;
import com.mjf.bean.ProvinceStatsWithSql;
import com.mjf.utils.MyKafkaUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * 地区主题宽表计算（程序不报错，没有数据。测试发现是TUMBLE函数的问题，具体原因不明）
 * <p>
 * 不建议使用 FlinkSql，调试困难。
 * 已使用 DataStreamAPI 重写 {@link ProvinceStatsApp}
 * <p>
 * 数据流向：web/app -> nginx -> springboot -> MySQL -> flinkApp -> kafka(ods) -> flinkApp -> kafka(dwd)/HBase(dim)
 * -> flinkApp(redis) -> Kafka(dwm) -> flinkApp -> clickhouse
 * 程序：mockDb(包含web/nginx/springboot) -> MySQL -> FlinkCDC(包含flinkApp/kafka(ods)) -> BaseDbApp(包含flinkApp/kafka(dwd)/HBase(dim))
 * -> OrderWideApp(包含flinkApp/Kafka(dwm)) -> ProvinceStatsSqlApp(包含flinkApp/clickhouse(dws))
 * 环境：hdfs zookeeper kafka hbase phoenix(bin/sqlline.py) redis clickhouse
 */
public class ProvinceStatsSqlApp {
    public static void main(String[] args) throws Exception {

        // 1.获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // 1.1 设置CK&状态后端
        //env.setStateBackend(new FsStateBackend("hdfs://hadoop102:8020/gmall-flink-210325/ck"));
        //env.enableCheckpointing(5000L);
        //env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //env.getCheckpointConfig().setCheckpointTimeout(10000L);
        //env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
        //env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000);

        //env.setRestartStrategy(RestartStrategies.fixedDelayRestart());

        // 2.使用DDL创建表 提取时间戳生成WaterMark
        String groupId = "ProvinceStatsSqlApp";
        String orderWideTopic = "dwm_order_wide";
        tableEnv.executeSql(
                "CREATE TABLE order_wide ( " +
                        "  `province_id` BIGINT, " +
                        "  `province_name` STRING, " +
                        "  `province_area_code` STRING, " +
                        "  `province_iso_code` STRING, " +
                        "  `province_3166_2_code` STRING, " +
                        "  `order_id` BIGINT, " +
                        "  `split_total_amount` DECIMAL, " +
                        "  `create_time` STRING, " +
                        "  `rt` as TO_TIMESTAMP(create_time), " +
                        "  WATERMARK FOR rt AS rt - INTERVAL '1' SECOND ) with(" +
                        MyKafkaUtil.getKafkaDDL(orderWideTopic, groupId) + ")"
        );

        // 3.查询数据  分组、开窗、聚合
        Table table = tableEnv.sqlQuery(
                "select " +
                        "    DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' SECOND), 'yyyy-MM-dd HH:mm:ss') stt, " +
                        "    DATE_FORMAT(TUMBLE_END(rt, INTERVAL '10' SECOND), 'yyyy-MM-dd HH:mm:ss') edt, " +
                        "    province_id, " +
                        "    province_name, " +
                        "    province_area_code, " +
                        "    province_iso_code, " +
                        "    province_3166_2_code, " +
                        "    count(distinct order_id) order_count, " +
                        "    sum(split_total_amount) order_amount, " +
                        "    UNIX_TIMESTAMP()*1000 ts " +
                        "from " +
                        "    order_wide " +
                        "group by " +
                        "    province_id, " +
                        "    province_name, " +
                        "    province_area_code, " +
                        "    province_iso_code, " +
                        "    province_3166_2_code, " +
                        "    TUMBLE(rt, INTERVAL '10' SECOND)"
        );

        // 4.将动态表转换为流
        DataStream<ProvinceStatsWithSql> provinceStatsDataStream = tableEnv.toAppendStream(table, ProvinceStatsWithSql.class);

        // 5.打印数据并写入ClickHouse
        provinceStatsDataStream.print();
/*        provinceStatsDataStream.addSink(ClickHouseUtil.getSink(
                "insert into province_stats values(?,?,?,?,?,?,?,?,?,?)"
        ));*/

        // 6.启动任务
        env.execute("ProvinceStatsSqlApp");

    }
}
