package com.cw.realtime.common
package base

import constant.Constant
import util.{FlinkSourceUtil, SqlUtil}

import org.apache.flink.api.common.JobExecutionResult
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.configuration.Configuration
import org.apache.flink.connector.kafka.source.KafkaSource
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
import org.apache.flink.streaming.api.environment.{StreamExecutionEnvironment => JavaEnv}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment

object FlinkRunner {

  case class RunnerConfig(groupId: String, topic: String, port: Int, parallelism: Int = 4, ckp: Boolean = false)
  case class SqlRunnerConfig(groupId: String, port: Int, parallelism: Int = 4, ckp: Boolean = false)


  /**
   * 从kafka中读取流， 并使用回调函数处理
   *
   * @param handler 回调函数， 用户的执行流
   * @param config  配置信息，包含topic、消费组等
   * @return
   */
  def run(handler: (StreamExecutionEnvironment, DataStream[String]) => Unit)
         (implicit config: RunnerConfig): JobExecutionResult = {

    System.setProperty("HADOOP_USER_NAME", "leng")

    val webUIConfig = getWebUIConfig(config.port)

    val env = new StreamExecutionEnvironment(JavaEnv.getExecutionEnvironment(webUIConfig))

    env.setParallelism(config.parallelism)


    env.setStateBackend(new HashMapStateBackend)

    setCheckpoint(env, config.ckp, config.groupId)

    val src = env.fromSource(
      FlinkSourceUtil.getKafkaSource(config.topic,config.groupId),
      WatermarkStrategy.noWatermarks(), "kafka_source"
    )


    handler(env, src)
    env.execute()

  }


  def runSql(handler: (StreamExecutionEnvironment, StreamTableEnvironment) => Unit)(implicit config: SqlRunnerConfig) = {

    val webUIConfig = getWebUIConfig(config.port)

    val env = new StreamExecutionEnvironment(JavaEnv.getExecutionEnvironment(webUIConfig))
    val tableEnv = StreamTableEnvironment.create(env)

    env.setParallelism(config.parallelism)
    env.setStateBackend(new HashMapStateBackend)

    setCheckpoint(env, config.ckp, config.groupId)

    handler(env, tableEnv)

  }


  private def getWebUIConfig(port: Int) = {
    val webUIConfig = new Configuration
    webUIConfig.setInteger("rest.port", port)
    webUIConfig
  }


  private def setCheckpoint(env: StreamExecutionEnvironment, ckp: Boolean, groupId: String): Unit = {
    if (ckp) {

      env.enableCheckpointing(5000L)

      env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

      env.getCheckpointConfig.setCheckpointStorage("hdfs://hadoop01:8020/gmall/stream/" + groupId)

      env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

      env.getCheckpointConfig.setMinPauseBetweenCheckpoints(5000)

      env.getCheckpointConfig.setCheckpointTimeout(10000)

      env.getCheckpointConfig.setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION)

      env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    }
  }

  def createTopicDbTable(tableEnv: StreamTableEnvironment)(implicit config: SqlRunnerConfig): Unit = {
    tableEnv.executeSql(SqlUtil.getKafkaTopicDb(config.groupId)).print()
  }


  def createBaseDic(tableEnv: StreamTableEnvironment): Unit = {
    tableEnv.executeSql(
      s"""
CREATE TABLE base_dic (
 rowkey string,
 info ROW<dic_name string>,
 PRIMARY KEY (rowkey) NOT ENFORCED
) WITH (
 'connector' = 'hbase-2.2',
 'table-name' = 'gmall:dim_base_dic',
 'zookeeper.quorum' = '${Constant.HBASE_ZOOKEEPER_QUORUM}'
);
      """).print()
  }


}
