package com.sweetdream.utils

import java.lang
import java.util.Properties
import java.util.concurrent.TimeUnit

import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.serialization.AbstractDeserializationSchema
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic}
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.Semantic
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer, KafkaSerializationSchema}
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}

/**
 * Title: 
 * Description: 
 * Date 2021/3/3
 */
class FlinkUtils(args: Array[String], sMainName: String) extends Serializable {

  val propertiesFilePath = args(0)
  val parameterTool = ParameterTool.fromPropertiesFile(propertiesFilePath)

  val properties = new Properties()
  properties.setProperty("bootstrap.servers", parameterTool.get("KAFKA_BROKER"))

  def getStreamExecutionEnvironment(): StreamExecutionEnvironment = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.getConfig.setGlobalJobParameters(parameterTool)

    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE)
    env.enableCheckpointing(60 * 1000) // 1分钟check一次
    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
    env.getCheckpointConfig.setCheckpointTimeout(10 * 60 * 1000)
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(6)
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
    // 增加重启策略 间隔5秒钟执行一次，直到重试5次之后仍然无法启动成功，则不再启动
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, Time.of(5, TimeUnit.SECONDS)))
    // 配置checkpoint失败容忍次数为最大值，主要为了保证程序的稳定运行和输出
    env.getCheckpointConfig.setTolerableCheckpointFailureNumber(Integer.MAX_VALUE)

    println(parameterTool.get("FsStateBackend_" + sMainName))
    if (OSUtils.isWinOs) {
      env.setStateBackend(new FsStateBackend("file:///d:/flinkcheckpoint"))
      println("running on the test environment!")
    } else {
      env.setStateBackend(new FsStateBackend(parameterTool.get("FsStateBackend_" + sMainName)))
    }

    env.getConfig.enableForceKryo()

    env
  }

  def getDataSource(): FlinkKafkaConsumer[Array[Byte]] = {
    val topic = parameterTool.get("TOPIC")

    properties.setProperty("group.id", parameterTool.get("TRANSACTION_GROUP_" + sMainName))
    properties.setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 1000 * 60 * 3 + "")
    properties.setProperty("auto.offset.reset", parameterTool.get("OFFSET_RESET_" + sMainName))

    val flinkConsumer = new FlinkKafkaConsumer[Array[Byte]](topic, new ByteArrayDeserializationSchema[Array[Byte]](), properties)

    flinkConsumer
  }

  /**
   * 接收protobuf格式的数据
   */
  class ByteArrayDeserializationSchema[T] extends AbstractDeserializationSchema[Array[Byte]] {
    override def deserialize(message: Array[Byte]): Array[Byte] = message
  }

  def getDataSink(): FlinkKafkaProducer[Array[Byte]] = {
    val kafakaSink = new FlinkKafkaProducer[Array[Byte]](
      parameterTool.get("SEND_TOPIC"),
      new UserDefinedKafkaSerializationSchema, properties,
      Semantic.AT_LEAST_ONCE)

    kafakaSink
  }

  /**
   * kafka sink 序列化类型
   */
  class UserDefinedKafkaSerializationSchema extends KafkaSerializationSchema[Array[Byte]] {
    override def serialize(element: Array[Byte], aLong: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
      new ProducerRecord[Array[Byte], Array[Byte]](parameterTool.get("SEND_TOPIC"),
        element, element)
    }
  }

}
