package com.shujia.flink.core

import java.lang
import java.util.Properties

import org.apache.flink.api.common.functions.RuntimeContext
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.common.state.{ReducingState, ReducingStateDescriptor}
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer}
import org.apache.flink.util.Collector
import org.apache.flink.api.common.functions.ReduceFunction
import org.apache.flink.kafka.shaded.org.apache.kafka.clients.producer.ProducerRecord
import org.apache.flink.runtime.state.StateBackend
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.Semantic
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema

object Demo6ReduceState {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    // 每 1000ms 开始一次 checkpoint
    env.enableCheckpointing(20000)

    // 高级选项：

    // 设置模式为精确一次 (这是默认值)
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)

    // 确认 checkpoints 之间的时间会进行 500 ms
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(500)

    // Checkpoint 必须在一分钟内完成，否则就会被抛弃
    env.getCheckpointConfig.setCheckpointTimeout(60000)

    // 同一时间只允许一个 checkpoint 进行
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(1)

    // 开启在 job 中止后仍然保留的 externalized checkpoints
    env.getCheckpointConfig.enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)


    //使用hdfs作为状态后端
    val stateBackend: StateBackend = new FsStateBackend("hdfs://master:9000/flink/checkpoint")

    env.setStateBackend(stateBackend)


    val properties = new Properties()
    properties.setProperty("bootstrap.servers", "master:9092,node1:9092,node2:9092")
    properties.setProperty("group.id", "asdasdasd")

    val myConsumer = new FlinkKafkaConsumer[String](
      "checkpoint-topic",
      new SimpleStringSchema(),
      properties)


    val kafkaDS: DataStream[String] = env.addSource(myConsumer)


    val kvDS: DataStream[(String, Int)] = kafkaDS.map(word => (word, 1))


    val keyByDS: KeyedStream[(String, Int), String] = kvDS.keyBy(_._1)


    val countDS: DataStream[(String, Int)] = keyByDS.process(new ReduceKeyedProcessFunction)


    //将结果保存到kafka中

    val properties1 = new Properties
    properties1.setProperty("bootstrap.servers", "master:9092,node1:9092,node2:9092")
    //不能大于15分钟
    properties1.setProperty("transaction.timeout.ms", 5 * 60 * 1000 + "")

    val myProducer = new FlinkKafkaProducer[String](
      "sink-topic",
      new MyKafkaSerializationSchema("sink-topic"),
      properties1,
      Semantic.EXACTLY_ONCE //唯一一次
    )


    countDS.map(w => w._1 + "," + w._2).addSink(myProducer)


    env.execute()

    /**
      * 从哪一个checkpoint的位置恢复任务
      * -s hdfs://master:9000/flink/checkpoint/6bdd4748b45af8317380db3ab9d29239/chk-100
      *
      *
      * flink run -c com.shujia.flink.core.Demo6ReduceState -s hdfs://master:9000/flink/checkpoint/f024ab744c8f665fe0bb3bba9c073829/chk-5  flink-1.0.jar
      *
      *
      * 消费kafka数据
      * kafka-console-consumer.sh --bootstrap-server  master:9092,node1:9092,node2:9092 --isolation-level read_committed  --from-beginning --topic sink-topic
      *
      */

  }
}


class MyKafkaSerializationSchema(topic: String) extends KafkaSerializationSchema[String] {
  override def serialize(element: String, timestamp: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
    //构建一行数据
    new ProducerRecord[Array[Byte], Array[Byte]](topic, element.getBytes())
  }
}


class ReduceKeyedProcessFunction extends KeyedProcessFunction[String, (String, Int), (String, Int)] {


  var state: ReducingState[Int] = _

  override def open(parameters: Configuration): Unit = {
    val context: RuntimeContext = getRuntimeContext

    /**
      * 聚合状态，需要一个聚合函数
      *
      */

    val reduceState = new ReducingStateDescriptor[Int]("reduce", new ReduceFunction[Int] {
      override def reduce(value1: Int, value2: Int): Int = value1 + value2
    }, classOf[Int])


    state = context.getReducingState(reduceState)
  }

  override def processElement(value: (String, Int), ctx: KeyedProcessFunction[String, (String, Int), (String, Int)]#Context, out: Collector[(String, Int)]): Unit = {

    //聚合状态会自动对传入的数值进行聚合操作
    state.add(value._2)


    //获取聚合值
    val count: Int = state.get()


    //将数据发送到下游
    out.collect((value._1, count))

  }
}
