package com.atguigu.day07

import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._

/*
    弱类型的自定义UDAF:
 */
class MyAvgLow extends UserDefinedAggregateFunction{
  //聚合函数输入的参数类型[ age ]
  override def inputSchema: StructType = StructType(Array(StructField("input",IntegerType)))
  //聚合过程中的中间变量类型
  override def bufferSchema: StructType = new StructType().add("sum",IntegerType).add("count",IntegerType)
  //聚合函数最终结果类型
  override def dataType: DataType = DoubleType
  //一致性
  override def deterministic: Boolean = true
  //给中间变量赋予初始值
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    //给sum赋予初始值
    buffer.update(0,0)
    //给count赋予初始值
    buffer.update(1,0)
  }
  //在每个分区中对输入的元素进行计算
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    //  sum = sum+ age
    val sum = buffer.getAs[Int](0)
    val age = input.getAs[Int](0)
    println(s"每个分区的计算: sum=${sum} age=${age} buffer=${buffer}")
    //更新sum
    buffer.update(0,sum+ age)
    //更新count
    val count= buffer.getAs[Int](1)
    buffer.update(1, count+1)
  }

  //针对每个分区的结果再次累加聚合
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    println(s"合并分区结果: buffer1=${buffer1} buffer2=${buffer2}")
      //之前的统计结果
      val aggSum = buffer1.getAs[Int](0)
      //当前一个分区的统计结果
      val currSum = buffer2.getAs[Int](0)

      val aggCount = buffer1.getAs[Int](1)
      val currCount = buffer2.getAs[Int](1)

    buffer1.update(0,aggSum+currSum)
    buffer1.update(1,aggCount+currCount)
  }
  //求得最终结果
  override def evaluate(buffer: Row): Any = {
    val sum = buffer.getAs[Int](0).toDouble
    val count = buffer.getAs[Int](1)
    sum/count
  }
}
