package util


import bean.Case.{CatergoryLog, CatergoryLogWide, Messages}
import bean.sink.Sink1
import com.alibaba.fastjson.JSON
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.common.time.Time
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.flink.table.api.Expressions.$
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import task.DataToWideTask

import java.util.Properties
import java.util.concurrent.TimeUnit

/**
 *
 * @program: util
 * @author: YCLW058
 * @create: 2021-05-26 17:25
 * @decsription:
 * scala-flink 程序入口类
 *
 * */

object APP {
  def main(args: Array[String]): Unit = {
    //1 env
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)
    val tableEnv = StreamTableEnvironment.create(env)

    // Checkpoint 参数设置
    // 1.1 必须参数
    //设置checkpoint的时间间隔为1000ms 做一次checkpoint
    env.enableCheckpointing(1000)
    //设置 State 状态存储介质/状态后端
    env.setStateBackend(new FsStateBackend("file:///D:/D:/Data/ckp"))
    // 1.2 建议参数
    //设置两个checkpoint 之间最少的等待时间
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)
    //设置 如果在做checkpoint过程种出现错误，是否让整体任务失败,可以有10次检查点错误
    env.getCheckpointConfig.setTolerableCheckpointFailureNumber(10)
    //设置是否清理检查点，表示 取消任务时 是否需要保留当前的checkpoint，默认删除 true
    env.getCheckpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)

    // 1.3 默认类型 需要了解
    // 设置checkpoint的执行模式为 EXACTLY_ONCE
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    // 设置chechpoint 的超时时间，如果checkpoint在60s内尚未完成 说明此次checkpoint失败 ，丢弃
    env.getCheckpointConfig.setCheckpointTimeout(60000) //10分钟
    //设置同一时间有多少个checkpoint可以同时执行
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    // 1.4 重启策略
    //固定延迟重启
    env.setRestartStrategy(
      RestartStrategies.fixedDelayRestart(
        3, // 最多重启3次数
        Time.of(5, TimeUnit.SECONDS)
      )
    )

    //2 source
    //准备kafka参数
    val props = new Properties
    // kafka地址
    props.setProperty("bootstrap.servers", "localhost:9092")
    // 消费者组 Id
    props.setProperty("group.id", "output_kafka")
    //从哪里开始消费数据
    //latest有offset记录从记录位置开始消费,没有记录从最新的/最后的消息开始消费 /earliest有offset记录从记录位置开始消费,没有记录从最早的/最开始的消息开始消费
    props.setProperty("auto.offset.reset", "latest")
    //开启一个后台线程，每隔5s检查一下kafka的分区情况，实现动态分区检测
    props.setProperty("flink.partition-discovery.interval-millis", "5000")

    //使用参数 创建FlinkKafkaConsumer
    val kafkaSource = new FlinkKafkaConsumer[String]("input_kafka", new SimpleStringSchema, props)
    //执行Checkpoint的时候提交offset到Checkpoint
    kafkaSource.setCommitOffsetsOnCheckpoints(true)

    val kafkaDS = env.addSource(kafkaSource)

    //3 transformation
    // 将json转化为样例类
    val messagesDS = kafkaDS.map(jsonStr => {
      //jsonStr转化为jsonOnject
      val jsonObj = JSON.parseObject(jsonStr)
      val count = jsonObj.getLong("count")
      val timeStamp = jsonObj.getLong("timeStamp")
      val messageJsonStr = jsonObj.getString("message")
      val catergoryLog = JSON.parseObject(messageJsonStr, classOf[CatergoryLog])
      Messages(catergoryLog, count, timeStamp)
    })
    //Messages(CatergoryLog(17,17,5,39423.870053028295,china,电信,实体店,安踏,1577890860000,1577890860000),1,1622096794593)

    val catergoryLogWideDS: DataStream[CatergoryLogWide] = DataToWideTask.process(messagesDS)
    // CatergoryLogWide(8,11,14,78692.5222841332,china,联通,百度链接,得物,1577890860000,1577890860000,1,1622101830004)

    //注册成表



    //4 sink
    catergoryLogWideDS.print()
    catergoryLogWideDS.addSink(new Sink1)


    //5 execute
    env.execute()


  }

}
