package com.shujia.dx

import org.apache.flink.contrib.streaming.state.RocksDBStateBackend
import org.apache.flink.runtime.state.StateBackend
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment

object Demo03CityTouristCntOnSQL {
  def main(args: Array[String]): Unit = {
    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() // 使用Blink的SQL解析器
      .inStreamingMode() // 开启流处理模式
      .build()
    // 构建Table Env
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)


    // 每1000ms做一次checkpoint
    bsEnv.enableCheckpointing(10000)
    // 高级选项(可选)
    // 设置CheckPoint的模式为EXACTLY_ONCE精确一次/完全一次(默认)
    bsEnv.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    // 设置两个CheckPoint任务之间的时间间隔
    bsEnv.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)
    // 设置CheckPoint的超时时间
    bsEnv.getCheckpointConfig.setCheckpointTimeout(60000)
    // 设置同一时刻最多能有多少个CheckPoint任务
    bsEnv.getCheckpointConfig.setMaxConcurrentCheckpoints(1)
    // 设置在任务取消时不清理CheckPoint保存的状态
    bsEnv.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    // 设置CheckPoint目录 使用文件系统作为状态后端（保存状态的地方）
    val backend: StateBackend = new RocksDBStateBackend("hdfs://master:9000/flink/wc/checkpoint", true)
    bsEnv.setStateBackend(backend)



    // 创建Source表
    bsTableEnv
      .executeSql(
        """
          |CREATE TABLE dianxin (
          | mdn STRING,
          | grid_id STRING,
          | city_id STRING,
          | county_id STRING,
          | duration STRING,
          | start_time STRING,
          | end_time STRING,
          | dt STRING
          |) WITH (
          | 'connector' = 'kafka',
          | 'topic' = 'dianxin',
          | 'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
          | 'properties.group.id' = 'test1',
          | 'format' = 'csv',
          | 'csv.field-delimiter' = ',',
          | 'scan.startup.mode' = 'earliest-offset'
          |)
          |""".stripMargin)

    // 构建sink表
    bsTableEnv.executeSql(
      """
        |CREATE TABLE city_cnt (
        |  city_id STRING,
        |  cnt BIGINT,
        |  PRIMARY KEY (city_id) NOT ENFORCED
        |) WITH (
        |   'connector' = 'jdbc',
        |   'url' = 'jdbc:mysql://master:3306/crm',
        |   'table-name' = 'city_cnt',
        |   'username' = 'root',
        |   'password' = '123456'
        |)
        |""".stripMargin)

    // 统计每个城市的游客数量（需要按照mdn去重）
    bsTableEnv
      .executeSql(
        """
          |insert into city_cnt
          |select city_id,count(distinct mdn) from dianxin group by city_id
          |""".stripMargin)
  }

  /**
   * flink run -m yarn-cluster  -yjm 1024m -ytm 1024m -c com.shujia.dx.Demo03CityTouristCntOnSQL Flink-1.0.jar
   */

}
