package com.dada.cn.join

import org.apache.flink.contrib.streaming.state.RocksDBStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment

import java.time.ZoneId

/**
 * 普通Join（Regular Join）之 Inner Join
 * Created by xuwei
 *
 *
6> +I[2, 2, null] insert
6> -D[2, 2, null] retract
6> +I[2, 2, 22]   inert

6> +I[2, 2, 22]   produce
6> +I[2, 2, 22]   produce

6> -D[2, 2, null] delete 右表
6> -D[2, 2, null]
6> -D[2, 2, null]
6> +I[2, 2, null]
6> +I[2, 2, null]
6> +I[2, 2, null]

6> -D[2, 2, 22]   delete主表

 */
object OverAgg {

  def main(args: Array[String]): Unit = {

    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    // 指定一个或多个本地目录
    val localRocksDbDirectories = Array("/opt/soft/flink/rocksdb")

    val rocksDBStateBackend: RocksDBStateBackend = new RocksDBStateBackend("file:/opt/soft/flink/checkpoint", true) // true 表示启用增量检查点
    rocksDBStateBackend.setDbStoragePaths(localRocksDbDirectories: _*)

    env.setStateBackend(rocksDBStateBackend)
    env.enableCheckpointing(30000) // 每 60 秒触发一次 Checkpoint
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    // 创建 RocksDBStateBackend 并设置本地目录



    //创建执行环境
    val settings = EnvironmentSettings
      .newInstance()
      .inStreamingMode()
      .build()
//    val tEnv = TableEnvironment.create(settings)
    val tEnv = StreamTableEnvironment.create(env, settings)

    //指定国内的时区
    tEnv.getConfig.setLocalTimeZone(ZoneId.of("Asia/Shanghai"))

    // set low-level key-value options
    tEnv.getConfig.getConfiguration.setString("table.exec.source.cdc-events-duplicate", "false")

    //订单表
    val UserOrderTableSql =
      """
        |CREATE TABLE user_order(
        |  order_id BIGINT,
        |  ts BIGINT,
        |  row_time AS cast(CURRENT_TIMESTAMP as timestamp(3)),
        |  primary key(order_id)  NOT ENFORCED
        |  -- 注意：d_timestamp的值可以从原始数据中取，原始数据中没有的话也可以从Kafka的元数据中取
        |  -- d_timestamp TIMESTAMP_LTZ(3) METADATA FROM 'timestamp'
        |)WITH(
        |  'connector' = 'kafka',
        |  'topic' = 'user_order',
        |  'properties.bootstrap.servers' = 'localhost:9092',
        |  'properties.group.id' = 'gid-sql-order1',
        |  -- 为了便于演示，在这使用latest-offset/earlist-offset，每次启动都使用最新的数据
        |  'scan.startup.mode' = 'earliest-offset',
        |  'format' = 'debezium-json'
        |)
        |""".stripMargin
    tEnv.executeSql(UserOrderTableSql)

    //支付表
    val PaymentFlowTableSql =
      """
        |CREATE TABLE payment_flow(
        |  order_id BIGINT,
        |  pay_money BIGINT,
        |  primary key(order_id)  NOT ENFORCED
        |)WITH(
        |  'connector' = 'kafka',
        |  'topic' = 'payment_flow',
        |  'properties.bootstrap.servers' = 'localhost:9092',
        |  'properties.group.id' = 'gid-sql-payment1',
        |  -- 为了便于演示，在这使用latest-offset，每次启动都使用最新的数据
        |  'scan.startup.mode' = 'earliest-offset',
        |  'format' = 'debezium-json'
        |)
        |""".stripMargin
    tEnv.executeSql(PaymentFlowTableSql)


    //结果表
    val resTableSql =
      """
        |CREATE TABLE order_payment(
        |  order_id  BIGINT,
        |  ts        BIGINT
        |)WITH(
        |  'connector' = 'print'
        |)
        |""".stripMargin
    tEnv.executeSql(resTableSql)

    //关联订单表和支付表
    val selectWhereSql =
      """
        | insert into order_payment
        | select order_id,ts
        | from
        | (select order_id,ts,
        | ROW_NUMBER() over (PARTITION BY order_id ORDER BY row_time) AS ranking
        | from user_order) t
        | where ranking = 1
      """.stripMargin

    tEnv.executeSql(selectWhereSql)

  }

}
