package com.atguigu.day07

import org.apache.spark.sql.{Encoder, Encoders}
import org.apache.spark.sql.expressions.Aggregator

/**
  * 强类型自定义udaf函数
  *     IN: 输入参数类型
  *     BUF: 中间变量类型
  *     OUT: 最终结果类型
  */
case class AgeBuf(var sum:Int,var count:Int)
class MyAvgHight extends Aggregator[Int,AgeBuf,Double]{
  //给中间变量赋予初始值
  override def zero: AgeBuf = AgeBuf(0,0)
  //在每个分区中进行计算
  override def reduce(buff: AgeBuf, age: Int): AgeBuf = {
    AgeBuf( buff.sum+age, buff.count+1 )
  }
  //对每个分区的结果汇总计算
  override def merge(buff1: AgeBuf, buff2: AgeBuf): AgeBuf = {

    AgeBuf( buff1.sum+ buff2.sum, buff1.count + buff2.count )
  }
  //计算得到最终结果
  override def finish(reduction: AgeBuf): Double = {
    reduction.sum.toDouble/reduction.count
  }
  //指定中间变量的编码类型
  override def bufferEncoder: Encoder[AgeBuf] = Encoders.product[AgeBuf]
  //编订最终结果的编码类型
  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}
