package com.baishancloud.log.douyin.demand1.subtask

import com.baishancloud.log.douyin._
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor}
import org.apache.flink.api.common.typeinfo.{TypeHint, TypeInformation}
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.state.{FunctionInitializationContext, FunctionSnapshotContext}
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.util.Collector

import scala.collection.mutable

/**
 * 由于头条  域名数据从金华地区切换到石家庄地区 kafka 集群，因此抖音的请求需要两个地方进行写入，故在原来的基础上，在 tag 标签中增加 kafka_source 字段来群是消费的哪个地区的 kafka 集群数据。<br>
 * 该本地聚合，针对于子任务进行本地聚合，在原始任务 tag 上增加了 kafka_source 。
 * @author ziqiang.wang
 * @date 2022/3/2 10:18
 */
class LocalAgg1Sub(parameterTool: ParameterTool) extends ProcessFunction[Result1Sub, Result1Sub] with CheckpointedFunction {

  /**
   * 保留checkpoint快照时的中间结果状态
   */
  var listState: ListState[(ResultKey1Sub, Result1Sub)] = _
  /**
   * 保存中间计算结果
   */
  val middleResult: mutable.Map[ResultKey1Sub, Result1Sub] = mutable.Map[ResultKey1Sub, Result1Sub]()
  /**
   * 输出阈值，处理多少条数据之后，输出所有的中间结果，默认值：1000
   */
  var outThreadNum: Long = 1000
  /**
   * 当前处理数据量
   */
  var count: Long = 0

  override def open(parameters: Configuration): Unit = {
    outThreadNum = parameterTool.getLong(localAggNum)
  }

  override def processElement(value: Result1Sub, ctx: ProcessFunction[Result1Sub, Result1Sub]#Context, out: Collector[Result1Sub]): Unit = {
    val key: ResultKey1Sub = value.getKey
    if (middleResult.contains(key)) {
      val lastResult: Result1Sub = middleResult(key)
      middleResult.put(key, Result1Sub(
        lastResult.time,
        lastResult.step,
        lastResult.name,
        lastResult.endpoint,
        lastResult.value + value.value,
        lastResult.tags,
        Fields1Sub(lastResult.fields.unusual_value + value.fields.unusual_value)
      ))
    } else {
      middleResult.put(key, value)
    }
    count += 1
    if (count % outThreadNum == 0) {
      //  处理数据量达到输出阈值，则输出所有中间结果，然后清空中间结果表
      middleResult.foreach(entry => out.collect(entry._2))
      middleResult.clear()
    }
  }


  /**
   * 做快照时执行，将中间结果数据写入listState状态
   */
  override def snapshotState(context: FunctionSnapshotContext): Unit = {
    listState.clear()
    middleResult.foreach(entry => listState.add(entry._1, entry._2))
  }

  /**
   * 初始化和恢复快照时执行<br>
   * 初始化listState，将快照数据恢复到中间结果表
   */
  override def initializeState(context: FunctionInitializationContext): Unit = {
    val descriptor: ListStateDescriptor[(ResultKey1Sub, Result1Sub)] = new ListStateDescriptor[(ResultKey1Sub, Result1Sub)]("中间计算结果", TypeInformation.of(new TypeHint[(ResultKey1Sub, Result1Sub)] {}))
    listState = context.getOperatorStateStore.getListState(descriptor)
    if (context.isRestored) {
      middleResult.clear()
      listState.get().forEach(entry => middleResult.put(entry._1, entry._2))
    }
  }
}
