package com.yanggu.bigdata.realtime.app.dws

import com.yanggu.bigdata.realtime.bean.ProvinceStats
import com.yanggu.bigdata.realtime.common.GmallConfig.KAFKA_BROKER_LIST
import com.yanggu.bigdata.realtime.utils.{ClickHouseUtil, KafkaUtil}
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._

/**
 * FlinkSQL 实现地区主题宽表计算
 */
object ProvinceStatsSqlApp {

  def main(args: Array[String]): Unit = {
    //1. 创建执行环境和表的执行环境
    val environment = StreamExecutionEnvironment.getExecutionEnvironment
    environment.setParallelism(1)
    val settings = EnvironmentSettings
      .newInstance()
      .inStreamingMode()
      .build()
    val tableEnv = StreamTableEnvironment.create(environment, settings)

    //2.1 DDL语句定义表
    val groupId = "province_stats";
    val orderWideTopic = "dwm_order_wide";

    //2.2 在tableEnv中注册表
    tableEnv.executeSql(
      s"""
         |CREATE TABLE order_wide(
         | order_id STRING,
         | province_id BIGINT,
         | province_name STRING,
         | province_area_code STRING,
         | province_iso_code STRING,
         | province_3166_2_code STRING,
         | total_amount DOUBLE,
         | create_time STRING,
         | row_time AS TO_TIMESTAMP(create_time),
         | WATERMARK FOR row_time AS row_time
         |) ${KafkaUtil.getKafkaDDL(KAFKA_BROKER_LIST, orderWideTopic, groupId)}
         |""".stripMargin)

    //3. 分组聚合开窗统计
    val provinceStateTable = tableEnv.sqlQuery(
      s"""
         |SELECT
         |  DATE_FORMAT(TUMBLE_START(row_time, INTERVAL '10' SECOND ), 'yyyy-MM-dd HH:mm:ss') AS stt,
         |  DATE_FORMAT(TUMBLE_END(row_time, INTERVAL '10' SECOND ), 'yyyy-MM-dd HH:mm:ss') AS edt,
         |  province_id,
         |  province_name,
         |  province_area_code,
         |  province_iso_code,
         |  province_3166_2_code,
         |  COUNT(DISTINCT order_id) AS order_count,
         |  SUM(total_amount) AS order_amount,
         |  UNIX_TIMESTAMP() * 1000 ts
         |FROM
         |  order_wide
         |GROUP BY
         |  TUMBLE(row_time, INTERVAL '10' SECOND), province_id, province_name, province_area_code, province_iso_code, province_3166_2_code
         |""".stripMargin)

    //4.1 转换成DataStream
    val resultDataStream = tableEnv.toAppendStream[ProvinceStats](provinceStateTable)
    resultDataStream.print("resultDataStream>>>>>")

    //4.2 保存到clickhouse中
    resultDataStream.addSink(ClickHouseUtil.getSinkFunction[ProvinceStats]("province_stats", classOf[ProvinceStats]))

    //5 启动Job
    environment.execute("ProvinceStatsSqlApp Job")

  }

}
