package util

import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.time.Time
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.DataTypes
import org.apache.flink.table.api.bridge.scala.{StreamTableEnvironment, tableConversions}
import org.apache.flink.table.descriptors.{Json, Kafka, Schema}

import java.util.Properties
import java.util.concurrent.TimeUnit

/**
 *
 * @program: util
 * @author: YCLW058
 * @create: 2021-06-01 9:12
 * @decsription:
 *
 * */

object Kafka_app {
  def main(args: Array[String]): Unit = {
    //1 env
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val tableEnv = StreamTableEnvironment.create(env)

    // Checkpoint 参数设置
    // 1.1 必须参数
    //设置checkpoint的时间间隔为1000ms 做一次checkpoint
    env.enableCheckpointing(1000)
    //设置 State 状态存储介质/状态后端
    env.setStateBackend(new FsStateBackend("file:///D:/D:/Data/ckp"))
    // 1.2 建议参数
    //设置两个checkpoint 之间最少的等待时间
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)
    //设置 如果在做checkpoint过程种出现错误，是否让整体任务失败,可以有10次检查点错误
    env.getCheckpointConfig.setTolerableCheckpointFailureNumber(10)
    //设置是否清理检查点，表示 取消任务时 是否需要保留当前的checkpoint，默认删除 true
    env.getCheckpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)

    // 1.3 默认类型 需要了解
    // 设置checkpoint的执行模式为 EXACTLY_ONCE
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    // 设置chechpoint 的超时时间，如果checkpoint在60s内尚未完成 说明此次checkpoint失败 ，丢弃
    env.getCheckpointConfig.setCheckpointTimeout(60000) //10分钟
    //设置同一时间有多少个checkpoint可以同时执行
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    // 1.4 重启策略
    //固定延迟重启
    env.setRestartStrategy(
      RestartStrategies.fixedDelayRestart(
        3, // 最多重启3次数
        Time.of(5, TimeUnit.SECONDS)
      )
    )

    //2 source
    tableEnv.connect(new Kafka()
      //版本 通用
      .version("universal")
      //主题
      .topic("input_kafka")
      //zookeeper ip
      .property("zookeeper.connect", "localhost:2181")
      //kafka ip
      .property("bootstrap.servers", "localhost:9092")
    )
      .withFormat(new Json())
      .withSchema(new Schema()
        .field("categoryID", DataTypes.BIGINT())
        .field("produceID", DataTypes.BIGINT())
        .field("userID", DataTypes.BIGINT())
        .field("price", DataTypes.DOUBLE())
        .field("country", DataTypes.STRING())
        .field("network", DataTypes.STRING())
        .field("source", DataTypes.STRING())
        .field("buyType", DataTypes.STRING())
        .field("buyTime", DataTypes.BIGINT())
        .field("leaveTime", DataTypes.BIGINT())
        .field("count", DataTypes.BIGINT())
        .field("timestamp", DataTypes.BIGINT())
      ).createTemporaryTable("cTable")


    //3 transformation
    val resultTable = tableEnv.sqlQuery(
      """
        |select *
        |from cTable
        |""".stripMargin)

    //4 sink
    // resultTable.toAppendStream[(Long,Long,Long,Double,String,String,String,String,Long,Long,Long,Long)].print()
    val table = tableEnv.from("cTable")
    table.toAppendStream[(Long, Long, Long, Double, String, String, String, String, Long, Long, Long, Long)].print()

    //5 execute
    env.execute()

  }

}
