package com.yanggu.flink.datastream_api.accumulate_batch

import cn.hutool.core.collection.CollUtil
import org.apache.flink.api.common.operators.ProcessingTimeService
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor}
import org.apache.flink.api.scala._
import org.apache.flink.runtime.state.{StateInitializationContext, StateSnapshotContext}
import org.apache.flink.streaming.api.operators.{AbstractStreamOperator, ChainingStrategy, OneInputStreamOperator}
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord

import java.util
import java.util.concurrent.ScheduledFuture

object TestMiniBatch {

  def main(args: Array[String]): Unit = {
    val environment = StreamExecutionEnvironment.getExecutionEnvironment
    environment.setParallelism(1)
    environment.disableOperatorChaining()

    environment.socketTextStream("localhost", 7777)
      .transform("MiniBatchOperator", new ProcessTimeMiniBatchOperator())
      .print("test")

    environment.execute()
  }

}

/**
 * 实现类似flink sql中MiniBatch的语义
 * <p>达到批大小或者是等待事件到就向下游输出攒批后的数据, 向下游输出的是一个List
 * <p>等待时间是处理时间, 这里不用进行keyBy也能使用处理时间定时器
 * <p>ProcTimeMiniBatchAssignerOperator查看相关代码
 *
 * @param intervalMs 等待时间
 * @param batchSize  批大小
 */
class ProcessTimeMiniBatchOperator(intervalMs: Long = 10000L, batchSize: Int = 5)
  extends AbstractStreamOperator[util.List[String]] with OneInputStreamOperator[String, util.List[String]]
    with ProcessingTimeService.ProcessingTimeCallback {

  /**
   * 本地缓存
   */
  private val buffer = new util.ArrayList[String](batchSize + 1)

  /**
   * 算子状态
   */
  private var listState: ListState[String] = _

  /**
   * 是否注册定时器的标记
   */
  private var scheduledFuture: ScheduledFuture[_] = _

  this.chainingStrategy = ChainingStrategy.ALWAYS

  //处理数据
  override def processElement(element: StreamRecord[String]): Unit = {
    buffer.add(element.getValue)
    //如果大于指定的数据, 把数据批量发送到下游
    if (buffer.size >= batchSize) {
      flush()
      return
    }

    //如果之前没有注册定时器, 注册一个定时器
    if (scheduledFuture == null) {
      val tempExecuteTimestamp = processingTimeService.getCurrentProcessingTime + intervalMs
      this.scheduledFuture = processingTimeService.registerTimer(tempExecuteTimestamp, this)
    }
  }

  //处理注册的定时器事件
  override def onProcessingTime(time: Long): Unit = {
    if (CollUtil.isNotEmpty(buffer)) {
      flush()
    }
  }

  //检查点快照方法
  override def snapshotState(context: StateSnapshotContext): Unit = {
    //清空状态中的数据
    listState.clear()
    if (CollUtil.isNotEmpty(buffer)) {
      //把本地变量中的数据保存到listState中
      listState.addAll(buffer)
    }
  }

  //检查点初始化方法
  override def initializeState(context: StateInitializationContext): Unit = {
    val listStateDescriptor = new ListStateDescriptor[String]("list-state", classOf[String])
    listState = context.getOperatorStateStore.getListState(listStateDescriptor)
    //如果是故障恢复, 需要将checkpoint中的数据拷贝到本地缓存中
    if (context.isRestored) {
      listState.get().forEach(temp => buffer.add(temp))
      //如果是故障恢复, 直接向下游输出数据
      //因为之前的定时器没有了, 如果buffer中有数据, 但是后面一直没有来数据,
      //定时器不会触发, 会导致buffer中的数据, 永远无法向下游输出, 因此这里手动向下游输出
      flush()
    }
  }

  /**
   * 把数据发送到下游
   * <p>同时, 如果注册了定时器, 删除定时器</p>
   */
  private def flush(): Unit = {
    if (CollUtil.isEmpty(buffer)) {
      return
    }
    val streamRecord = new StreamRecord[util.List[String]](buffer)
    //数据发送到下游
    output.collect(streamRecord)
    //清空状态
    buffer.clear()
    //如果之前注册了定时器, 删除定时器
    if (scheduledFuture != null) {
      scheduledFuture.cancel(true)
      scheduledFuture = null
    }
  }

}
