package com.yanggu.flink.datastream_api.state.operatorstate

import cn.hutool.core.date.DateUtil
import com.yanggu.flink.datastream_api.pojo.SensorReading
import com.yanggu.flink.datastream_api.source.MySensorSource
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor}
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.state.{FunctionInitializationContext, FunctionSnapshotContext}
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction
import org.apache.flink.streaming.api.functions.sink.{RichSinkFunction, SinkFunction}
import org.apache.flink.streaming.api.scala._
import org.apache.flink.util.concurrent.ExecutorThreadFactory

import java.util
import java.util.Date
import java.util.concurrent.{Executors, ScheduledFuture, TimeUnit}
import scala.collection.convert.ImplicitConversions.`list asScalaBuffer`

object OperatorStateDemo {

  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    val stream = env.addSource(new MySensorSource).setParallelism(1)
    stream.addSink(new MySinkFunction).setParallelism(1)
    env.execute()
  }

}

/**
 * invoke、flush、snapshotState方法在多线程环境下会并发执行
 * invoke和snapshotState方法由flink管理不会并发执行, 但是flush是自定义的函数
 * 所以三个函数需要手动加锁synchronized或者ReentrantLock
 *
 * @param batchSize
 * @param batchIntervalMs
 */
class MySinkFunction(val batchSize: Int = 10, val batchIntervalMs: Long = 10000L) extends RichSinkFunction[SensorReading] with CheckpointedFunction {

  private val listBuffer = new util.ArrayList[SensorReading]()

  private var listState: ListState[SensorReading] = _

  private var closed = false

  private var scheduler: ScheduledFuture[_] = _

  //定时任务的调度器
  private lazy val scheduledFuture = Executors.newScheduledThreadPool(1,
    new ExecutorThreadFactory("my-sink-thread"))

  override def open(parameters: Configuration): Unit = {
    //这里进行周期性的flush数据
    //这里需要使用scheduleWithFixedDelay方法, 该方法会已固定间隔时间执行, 不管该方法有没有执行完毕
    scheduler = scheduledFuture.scheduleWithFixedDelay(() => {
      flush()
    }, batchIntervalMs, batchIntervalMs, TimeUnit.MILLISECONDS)
  }

  //这里由于invoke和flush方法是并行执行的, 需要进行显示加锁
  override def invoke(value: SensorReading, context: SinkFunction.Context): Unit = {
    this.synchronized {
      listBuffer.add(value)
      if (listBuffer.size >= batchSize) {
        flush()
      }
    }
  }

  def flush(): Unit = {
    this.synchronized {
      for (elem <- listBuffer) {
        println(s"${Thread.currentThread().getName}-$elem")
      }
      listBuffer.clear()
      println(s"${Thread.currentThread().getName}-${DateUtil.formatDateTime(new Date())}输出结束")
    }
  }

  /**
   * snapshotState会在flink进行checkpoint方法的时候被调用
   *
   * @param context
   */
  override def snapshotState(context: FunctionSnapshotContext): Unit = {
    this.synchronized {
      println(s"${Thread.currentThread().getName}在${DateUtil.formatDateTime(new Date())}执行snapshotState方法")
      //清空状态中的数据
      listState.clear()
      //把本地变量中的数据保存到listState中
      listBuffer.foreach(listState.add(_))
    }
  }

  /**
   * initializeState在两种情况下会被调用
   * flink程序刚刚启动的时候
   * flink程序故障恢复的时候：failOver
   * context.isRestored方法的返回值用来区分是刚启动还是故障恢复
   * true表示failover, false表示刚启动
   *
   * 同时initializeState方法也会先于open方法执行
   *
   * @param context
   */
  override def initializeState(context: FunctionInitializationContext): Unit = {
    val listStateDescriptor = new ListStateDescriptor[SensorReading]("list-state", classOf[SensorReading])
    listState = context.getOperatorStateStore.getListState(listStateDescriptor)
    //如果是故障恢复, 需要将checkpoint中的数据拷贝到本地缓存中
    if (context.isRestored) {
      listState.get().forEach(listBuffer.append(_))
    }
  }

  override def close(): Unit = {
    //停止调度器
    this.scheduler.cancel(false)
    closed = false
  }

}
