package com.yanggu.flink.datastream_api.accumulate_batch

import cn.hutool.core.collection.CollUtil
import org.apache.flink.api.common.functions.AggregateFunction
import org.apache.flink.api.common.operators.ProcessingTimeService
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor}
import org.apache.flink.api.common.typeinfo.{TypeHint, TypeInformation}
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.api.scala.typeutils.Types
import org.apache.flink.streaming.api.scala._
import org.apache.flink.runtime.state.{StateInitializationContext, StateSnapshotContext}
import org.apache.flink.streaming.api.operators.{AbstractStreamOperator, OneInputStreamOperator}
import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}
import org.apache.flink.streaming.runtime.streamrecord.StreamRecord

import java.util
import java.util.concurrent.ScheduledFuture
import java.util.concurrent.atomic.AtomicInteger
import scala.language.postfixOps


object TestLocalKeyByAccumulateBatch {

  def main(args: Array[String]): Unit = {
    val environment = StreamExecutionEnvironment.getExecutionEnvironment
    environment.setParallelism(1)

    val keySelector1: KeySelector[String, String] = (value: String) => value
    val elementTypeInfo = Types.TUPLE[(String, Long)]
    val aggregateFunction = new SumAggregateFunction()
    val operator = new LocalKeyByAccumulateBatchOperator[String, String, Long, Long](keySelector1, elementTypeInfo, aggregateFunction)

    val keySelector2: KeySelector[(String, Long), String] = (value: (String, Long)) => value._1

    environment
      .socketTextStream("localhost", 6666)
      .flatMap(_.split(" "))
      .transform("LocalKeyByAccumulateBatchOperator", operator)
      .keyBy(keySelector2)
      .reduce((a, b) => (a._1, aggregateFunction.merge(a._2, b._2)))
      .map(x => (x._1, aggregateFunction.getResult(x._2)))
      .print("print>>")

    environment.execute()
  }

}

class LocalKeyByAccumulateBatchOperator[KEY, IN, ACC, OUT](keySelector: KeySelector[IN, KEY],
                                                           elementTypeInfo: TypeInformation[(KEY, ACC)],
                                                           aggregateFunction: AggregateFunction[IN, ACC, OUT],
                                                           batchSize: Int = 100,
                                                           intervalMs: Long = 200L)
    extends AbstractStreamOperator[(KEY, ACC)] with OneInputStreamOperator[IN, (KEY, ACC)] with ProcessingTimeService.ProcessingTimeCallback with Serializable {

  /**
   * 计数器，获取当前批次接收的数据量
   */
  private var currentSize: AtomicInteger = _

  /**
   * 本地缓冲
   */
  @transient
  private var localMap: util.Map[KEY, ACC] = _

  @transient
  private var scheduledFuture: ScheduledFuture[_] = _

  @transient
  private var listState: ListState[(KEY, ACC)] = _

  /**
   * 初始化方法
   */
  @throws[Exception]
  override def initializeState(context: StateInitializationContext): Unit = {
    //初始化本地缓存
    localMap = new util.HashMap[KEY, ACC]
    //初始化计数器
    currentSize = new AtomicInteger(0)
    val listStateDescriptor: ListStateDescriptor[(KEY, ACC)] = new ListStateDescriptor[(KEY, ACC)]("list-state", elementTypeInfo)
    listState = context.getOperatorStateStore.getListState(listStateDescriptor)
    //如果是状态恢复
    if (context.isRestored) {
      //将状态数据添加到本地缓存中
      listState.get().forEach(tuple2 => {
        val acc: ACC = localMap.getOrDefault(tuple2._1, aggregateFunction.createAccumulator)
        localMap.put(tuple2._1, aggregateFunction.merge(acc, tuple2._2))
      })
      //状态恢复强制向下游输出
      //由于这时候没有注册定时器, 如果本地缓冲有数据
      //但是后面没有新的数据来, 不会注册新的定时器
      //可能会造成永远不会向下游输出
      //因此这里强制向下游输出
      if (CollUtil.isNotEmpty(localMap)) {
        flush()
      }
    }
  }

  @throws[Exception]
  override def snapshotState(context: StateSnapshotContext): Unit = {
    //将本地缓存中的数据拷贝到listState中
    listState.clear()
    if (CollUtil.isNotEmpty(localMap)) {
      localMap.forEach((t: KEY, u: ACC) => listState.add((t, u)))
    }
  }

  @throws[Exception]
  override def processElement(element: StreamRecord[IN]): Unit = {
    val in: IN = element.getValue
    val key: KEY = keySelector.getKey(in)
    var acc: ACC = localMap.getOrDefault(key, aggregateFunction.createAccumulator)
    acc = aggregateFunction.add(in, acc)
    localMap.put(key, acc)
    //如果大于batchSize, 直接flush并且return
    if (currentSize.incrementAndGet >= batchSize) {
      flush()
      return
    }
    //如果没有注册定时器, 注册一个定时器
    if (scheduledFuture == null) {
      val tempTimestamp: Long = processingTimeService.getCurrentProcessingTime + intervalMs
      scheduledFuture = processingTimeService.registerTimer(tempTimestamp, this)
    }
  }

  /**
   * 定时器回调函数
   */
  override def onProcessingTime(time: Long): Unit = {
    if (CollUtil.isNotEmpty(localMap)) {
      flush()
    }
  }

  private def flush(): Unit = {
    if (CollUtil.isEmpty(localMap)) {
      return
    }
    //for循环向下游输出Tuple2<KEY, ACC>
    localMap.forEach((key: KEY, acc: ACC) => {
      val tuple2StreamRecord: StreamRecord[(KEY, ACC)] = new StreamRecord[(KEY, ACC)]((key, acc))
      output.collect(tuple2StreamRecord)
    })
    //清空本地缓存
    localMap.clear()
    currentSize.set(0)
    //如果之前注册了定时器, 删除定时器
    if (scheduledFuture != null) {
      scheduledFuture.cancel(true)
      scheduledFuture = null
    }
  }

}

class SumAggregateFunction extends AggregateFunction[String, Long, Long] {

  override def createAccumulator = 0L

  override def add(value: String, accumulator: Long) = accumulator + 1L

  override def getResult(accumulator: Long) = accumulator

  override def merge(a: Long, b: Long) = a + b

}
