package com.chenxu.gmall.realtime.app.dws;

import com.chenxu.gmall.realtime.bean.ProvinceStats;
import com.chenxu.gmall.realtime.utils.ClickHouseUtil;
import com.chenxu.gmall.realtime.utils.MyKafkaUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Date: 2021/07/19
 * Desc: 使用FlinkSQL对地区主题统计
 */
public class ProvinceStatsSqlApp {
    public static void main(String[] args) throws Exception {
        //TODO 1. 基本环境准备
        //1.1 创建Flink流式处理环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //1.2 设置并行度
        env.setParallelism(1);
        /*
        //1.3 检查点CK相关设置
        env.enableCheckpointing(5000, CheckpointingMode.AT_LEAST_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        StateBackend fsStateBackend = new FsStateBackend("hdfs://hadoop102:8020/gmall/flink/checkpoint/ProductStatsApp");
        env.setStateBackend(fsStateBackend);
        System.setProperty("HADOOP_USER_NAME","chenxu");
        */
        //1.4 创建Table环境（这里可以不写这一步，因为下面就是默认配置）默认BlinkPlanner；
        EnvironmentSettings setting = EnvironmentSettings
            .newInstance() //创建Builder对象；
            .inStreamingMode() //设定为流处理模式（默认）
            .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, setting);

        //TODO 2.把数据源定义为动态表
        String groupId = "province_stats";
        String orderWideTopic = "dwm_order_wide";

        /*
        kafka表创建样例：
        CREATE TABLE KafkaTable (
          `user_id` BIGINT,
          `item_id` BIGINT,
          `behavior` STRING,
          `ts` TIMESTAMP(3) METADATA FROM 'timestamp'
        ) WITH (
          'connector' = 'kafka',
          'topic' = 'user_behavior',
          'properties.bootstrap.servers' = 'localhost:9092',
          'properties.group.id' = 'testGroup',
          'scan.startup.mode' = 'earliest-offset',
          'format' = 'csv'
        )
         */

        //rowtime AS TO_TIMESTAMP(create_time)表示把create_time修改成timeStamp的形式；
        //TO_TIMESTAMP(create_time)[,format]默认格式就是yyyy-MM-dd HH:mm:ss，正好此时数据格式就是这样的，所以不需要指定；
        //严格递增时间戳： WATERMARK FOR  rowtime  AS rowtime  这里是这一种；即事件时间等于WaterMark时间；
        //发出到目前为止已观察到的最大时间戳的 watermark ，时间戳大于最大时间戳的行被认为没有迟到。
        //递增时间戳： WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '0.001' SECOND。迟到1毫秒；
        //发出到目前为止已观察到的最大时间戳减 1 的 watermark ，时间戳大于或等于最大时间戳的行被认为没有迟到。
        //有界乱序时间戳：发出到目前为止已观察到的最大时间戳减去指定延迟的watermark
        // WATERMARK FOR rowtime_column AS rowtime_column - INTERVAL '5' SECOND 是一个 5 秒延迟的 watermark 策略。

        tableEnv.executeSql("CREATE TABLE ORDER_WIDE (province_id BIGINT, " +
            "province_name STRING,province_area_code STRING" +
            ",province_iso_code STRING,province_3166_2_code STRING,order_id STRING, " +
            "split_total_amount DOUBLE,create_time STRING,rowtime AS TO_TIMESTAMP(create_time) ," +
            "WATERMARK FOR  rowtime  AS rowtime)" +
            " WITH (" + MyKafkaUtil.getKafkaDDL(orderWideTopic, groupId) + ")");


        //TODO 3.聚合计算
        //TUMBLE_START和TUMBLE_END表示窗口开始时间和窗口结束时间；
        //DATE_FORMAT表示转化时间格式；
        //COUNT( DISTINCT  order_id) 订单数目
        //sum(split_total_amount) 金额加总
        //group by  TUMBLE(rowtime, INTERVAL '10' SECOND ) 开窗操作；两个参数：事件时间，窗口大小；
        Table provinceStateTable = tableEnv.sqlQuery("select " +
            "DATE_FORMAT(TUMBLE_START(rowtime, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') stt, " +
            "DATE_FORMAT(TUMBLE_END(rowtime, INTERVAL '10' SECOND ),'yyyy-MM-dd HH:mm:ss') edt , " +
            " province_id,province_name,province_area_code area_code," +
            "province_iso_code iso_code ,province_3166_2_code iso_3166_2 ," +
            "COUNT( DISTINCT  order_id) order_count, sum(split_total_amount) order_amount," +
            "UNIX_TIMESTAMP()*1000 ts "+
            " from  ORDER_WIDE group by  TUMBLE(rowtime, INTERVAL '10' SECOND )," +
            " province_id,province_name,province_area_code,province_iso_code,province_3166_2_code ");

        //TODO 4.将动态表转换为数据流
        //只有追加流；
        DataStream<ProvinceStats> provinceStatsDS = tableEnv.toAppendStream(provinceStateTable, ProvinceStats.class);
        //DataStream<Tuple2<Boolean, ProvinceStats>> provinceStatsDS = tableEnv.toRetractStream(provinceStateTable, ProvinceStats.class);

        provinceStatsDS.print(">>>>");

        //TODO 5.将流中的数据保存到ClickHouse
        //Flink现在还不支持直接对接Clickhouse；
         provinceStatsDS.addSink(
            ClickHouseUtil.getJdbcSink(
                "insert into  province_stats_0709  values(?,?,?,?,?,?,?,?,?,?)"
            )
        );

         //        //整体测试
        //        ➢ 启动 ZK、Kafka、ClickHouse、Redis、HDFS、Hbase、Maxwell
        //        ➢ 运行 BaseDBApp
        //        ➢ 运行 OrderWideApp
        //        ➢ 运行 ProvinceStatsSqlApp
        //        ➢ 运行 rt_dblog 目录下的 jar 包 ➢ 查看控制台输出
        //        ➢ 查看 ClickHouse 中 province_stats_0709 表数据
        //注意，由于窗口时间和脚本时间的问题，需要运行两次业务数据脚本；

        /*
        provinceStatsDS.print(">>>>")的结果类型：
        >>>>:2> ProvinceStats(stt=2021-07-19 21:59:00, edt=2021-07-19 21:59:10, province_id=9, province_name=安徽,
        area_code=340000, iso_code=CN-34, iso_3166_2=CN-AH, order_amount=69.000000000000000000,
        order_count=1, ts=1626703155000)
         */
        env.execute();
    }
}
