package day02

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, SparkSession, functions}

/**
 * @author wsl
 * @version 2022-10-12
 *          聚合
 */
object UDAF {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setAppName("spark sql").setMaster("local[*]")
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    val df: DataFrame = spark.read.json("sparksql/input/user.json")

    df.createOrReplaceTempView("user")

    spark.udf.register("myavg",functions.udaf(new MyAvgUDAF))

    //求平均值
    spark.sql("select avg(age) from user").show()
    spark.sql("select myavg(age) from user").show()

    spark.stop()


  }
}

//自定义缓冲buff   sum（18+19+20） / 个数 （1+1+1）
case class Buff(var sum:Long,var cnt:Long)

class MyAvgUDAF extends Aggregator[Long, Buff, Double] {
  //buff的初始化
  override def zero: Buff = Buff(0L,0L)

  //分区内 对buffer聚合
  override def reduce(buff: Buff, age: Long): Buff = {
    buff.sum+=age
    buff.cnt+=1L
    buff
  }

  override def merge(b1: Buff, b2: Buff): Buff = {
    b1.sum+=b2.sum
    b1.cnt+=b2.cnt
    b1
  }

  //最终结束的方法 这里是求平均值方法
  override def finish(buff: Buff): Double = {
    buff.sum.toDouble/buff.cnt
  }
  // SparkSQL对传递的对象的序列化操作（编码）
  // 自定义类型就是product   自带类型根据类型选择

  override def bufferEncoder: Encoder[Buff] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}
