package com.wzk.dwetl.utils

import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DataTypes, StructField, StructType}
import org.roaringbitmap.RoaringBitmap

object RRAggregationOrAggregationFunction0 extends UserDefinedAggregateFunction{
  //函数的输入参数的：个数和类型(结构)
  override def inputSchema: StructType = new StructType(Array(new StructField("bitmap",DataTypes.BinaryType)))

  //中间状态数据缓冲的：个数和类型(结构)
  override def bufferSchema: StructType = new StructType(Array(new StructField("buff",DataTypes.BinaryType)))

  //返回结果的数据类型
  override def dataType: DataType = DataTypes.BinaryType

  //结算逻辑是否“确定”
  override def deterministic: Boolean = true

  //缓存结构的初始化
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    //构造一个空的bitmap作为缓存的初始值
    val rr = new RoaringBitmap()
    val bytes = RrUtils.ser(rr)

    buffer.update(0,bytes)
  }

  //收到新输入数据后，更新缓存状态
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    val inputRrBytes = input.getAs[Array[Byte]](0)
    val bufferRrBytes = buffer.getAs[Array[Byte]](0)

    //反序列化
    val inputRr = RrUtils.de(inputRrBytes)
    val bufferRr = RrUtils.de(bufferRrBytes)

    //或操作
    bufferRr.or(inputRr)

    //更新回缓存
    buffer.update(0,RrUtils.ser(bufferRr))
  }

  //分布式机器的汇总聚合

  override def merge(buffer: MutableAggregationBuffer, input: Row): Unit = {
    update(buffer,input)
  }

  //从汇总后的缓存数据中，计算出函数最后要返回的结果
  override def evaluate(buffer: Row): Any = {
    buffer.getAs[Array[Byte]](0)
  }
}
