package com.atguigu.realtime.app.dws;

import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/2/15 21:59
 */
public class DWSProvinceStatsSqlApp {
    public static void main(String[] args) {
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        env
            .getCheckpointConfig()
            .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.setStateBackend(new FsStateBackend("hdfs://hadoop162:8020/gmall2021/flink/checkpoint2"));
        
        final StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        // 1. 注册SourceTable: 从Kafka读数据
        tenv.executeSql("CREATE TABLE order_wide (" +
                            "   province_id BIGINT, " +
                            "   province_name STRING," +
                            "   province_area_code STRING," +
                            "   province_iso_code STRING," +
                            "   province_3166_2_code STRING," +
                            "   order_id STRING, " +
                            "   split_total_amount DOUBLE," +
                            "   create_time STRING, " +
                            "   rowtime AS TO_TIMESTAMP(create_time)," +
                            "   WATERMARK FOR  rowtime  AS rowtime - interval '5' second )" +
                            "WITH (" +
                            "   'connector' = 'kafka'," +
                            "   'topic' = 'dwm_order_wide'," +
                            "   'properties.bootstrap.servers' = 'hadoop162:9029,hadoop163:9092,hadoop164:9092'," +
                            "   'properties.group.id' = 'DWSProvinceStatsSqlApp'," +
                            "   'scan.startup.mode' = 'latest-offset'," +
                            "   'format' = 'json'" +
                            ")");
        
        // 2. 注册SinkTable: 向ClickHouse写数据
        tenv.executeSql("create table province_stats_2021(" +
                            "   stt string," +
                            "   edt string," +
                            "   province_id bigint," +
                            "   province_name string," +
                            "   area_code string," +
                            "   iso_code string," +
                            "   iso_3166_2 string," +
                            "   order_amount decimal(20, 2)," +
                            "   order_count bigint, " +
                            "   ts bigint, " +
                            "   PRIMARY KEY (stt, edt, province_id) NOT ENFORCED" +
                            ")with(" +
                            "   'connector' = 'clickhouse', " +
                            "   'url' = 'clickhouse://hadoop162:8123', " +
                            "   'database-name' = 'gmall2021', " +
                            "   'table-name' = 'province_stats_2021'," +
                            "   'sink.batch-size' = '100', " +
                            "   'sink.flush-interval' = '1000', " +
                            "   'sink.max-retries' = '3' " +
                            ")");
        // 3. 从SourceTable查询数据, 并写入到SinkTable
        //        tenv.sqlQuery("select * from order_wide").execute().print();
        tenv.executeSql("insert into province_stats_2021 " +
                            "select " +
                            "   DATE_FORMAT(TUMBLE_START(rowtime, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') stt, " +
                            "   DATE_FORMAT(TUMBLE_END(rowtime, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') edt , " +
                            "   province_id," +
                            "   province_name," +
                            "   province_area_code area_code," +
                            "   province_iso_code iso_code," +
                            "   province_3166_2_code iso_3166_2 ," +
                            "   sum(split_total_amount) order_amount," +
                            "   COUNT(DISTINCT order_id) order_count, " +
                            "   UNIX_TIMESTAMP()*1000 ts " +
                            "from  order_wide " +
                            "group by  " +
                            "   TUMBLE(rowtime, INTERVAL '10' SECOND ), " +
                            "   province_id," +
                            "   province_name," +
                            "   province_area_code," +
                            "   province_iso_code," +
                            "   province_3166_2_code ");
        
    }
}
