package cn.getech.data.development.utils

import org.apache.flink.api.common.RuntimeExecutionMode
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.environment.CheckpointConfig
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.slf4j.{Logger, LoggerFactory}

object FlinkUtils {
  val path = ConfigurationManager.getProperty("hdfs.checkpoint.path")
  private val logger: Logger = LoggerFactory.getLogger(this.getClass)

  /**
   * create flink env
   *
   * @param jobName checkpoint的文件路径
   * @return
   */
  def env(jobName: String) = {
    createStreamEnv(path + "/" + jobName)
  }

  def createStreamTableEnv(env: StreamExecutionEnvironment): StreamTableEnvironment = {
    StreamTableEnvironment.create(env, createStreamSettings)
  }

  def syncEnv(jobName: String) = {
    createSyncStreamEnv(path + "/" + jobName)
  }

  /**
   * 获取flink环境
   *
   * @return
   */
  def getEnv: StreamExecutionEnvironment = {
   val env = StreamExecutionEnvironment.getExecutionEnvironment
//    val env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new org.apache.flink.configuration.Configuration())
    System.setProperty(ConfigurationManager.getProperty("hdfs.account"), ConfigurationManager.getProperty("hdfs.username"))
    env.setRuntimeMode(RuntimeExecutionMode.STREAMING)
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, org.apache.flink.api.common.time.Time.seconds(5)))
    env.getCheckpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    env
  }

  /**
   * 设置setting
   *
   * @return
   */
  def createStreamSettings: EnvironmentSettings = {
    EnvironmentSettings.newInstance.useBlinkPlanner.inStreamingMode().build
  }

  /**
   * flink dag & sql
   *
   * @param hdfsPath hdfsPath
   * @return
   */
  def createStreamEnv(hdfsPath: String): StreamExecutionEnvironment = {
    logger.info("checkpoint path: {}", hdfsPath)
    val env: StreamExecutionEnvironment = getEnv
    env.enableCheckpointing(1000 * 30)
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(30 * 1000)
    env.getCheckpointConfig.setCheckpointTimeout(1000 * 60)
    env.getCheckpointConfig.setTolerableCheckpointFailureNumber(3) // 容忍3个失败
    env.setStateBackend(new FsStateBackend(hdfsPath, true))
    env
  }

  /**
   * 同步数据
   *
   * @param hdfsPath hdfsPath
   * @return
   */
  def createSyncStreamEnv(hdfsPath: String): StreamExecutionEnvironment = {
    logger.info("checkpoint path: {}", hdfsPath)
    val env: StreamExecutionEnvironment = getEnv
    env.enableCheckpointing(60000) // 1m
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(30000) // 30s
    env.getCheckpointConfig.setCheckpointTimeout(1000 * 60 * 2) // 2m
    env.getCheckpointConfig.setTolerableCheckpointFailureNumber(3) // 容忍3个失败
    env.setStateBackend(new FsStateBackend(hdfsPath, true))
    env
  }
}
