package com.shujia.common

import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.state.StateBackend
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.table.catalog.hive.HiveCatalog
import org.apache.flink.table.module.hive.HiveModule

object FlinkTool {

  /**
    * 获取flink sql 的运行环境
    *
    * @return
    */
  def getFlinkTableEnv(
                        checkpointInterval: Long = 10000,
                        checkpointDir: String = "hdfs://master:9000/flink/gma"
                      ): StreamTableEnvironment = {
    //创建flink 环境
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    //设置table 环境的一些参数
    val bsSettings: EnvironmentSettings = EnvironmentSettings.newInstance()
      .useBlinkPlanner() //使用blikc计划器
      .inStreamingMode() //流模式
      .build()

    // 创建flink  table 环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env, bsSettings)

    val configuration = new Configuration()
    configuration.setBoolean("table.dynamic-table-options.enabled", true)
    bsTableEnv.getConfig.addConfiguration(configuration)

    /**
      * 开启checkpoint
      *
      */

    // 每 1000ms 开始一次 checkpoint
    env.enableCheckpointing(checkpointInterval)

    // 高级选项：
    // 设置模式为精确一次 (这是默认值)
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    // 确认 checkpoints 之间的时间会进行 500 ms
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)

    // Checkpoint 必须在一分钟内完成，否则就会被抛弃
    env.getCheckpointConfig.setCheckpointTimeout(60000)

    // 同一时间只允许一个 checkpoint 进行
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    //当作业取消时，保留作业的 checkpoint。注意，这种情况下，需要手动清除该作业保留的 checkpoint。
    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)

    //将状态保存到hdfs的状态后端
    val stateBackend: StateBackend = new FsStateBackend(checkpointDir)

    //设置状态后端
    env.setStateBackend(stateBackend)


    /**
      * 使用hive的元数据服务
      *
      */
    val name = "myhive"
    //默认数据库
    val defaultDatabase = "default"
    //集群中hive配置文件所在的位置
    val hiveConfDir = "/usr/local/soft/hive-1.2.1/conf"

    val hive = new HiveCatalog(name, defaultDatabase, hiveConfDir)
    //注册元数据
    bsTableEnv.registerCatalog("myhive", hive)

    // 切换元数据
    bsTableEnv.useCatalog("myhive")


    //加载hive的函数
    bsTableEnv.loadModule("myhive", new HiveModule("1.2.1"))

    //返回flink table 环境
    bsTableEnv

  }
}
