package com.atguigu.bigdata.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, LongType, StructField, StructType}
import org.apache.spark.sql._

/**
  * create by undeRdoG on  2021-06-19  12:37
  * 凡心所向，素履以往，生如逆旅，一苇以航。
  */
object Spark03_SparkSQL_UDAF1 {

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL")

    val spark = SparkSession.builder().config(sparkConf).getOrCreate()


    val df: DataFrame = spark.read.json("datas/user.json")

    df.createOrReplaceTempView("user")

    spark.udf.register("AgeAvg", functions.udaf(new MyAvgUDAF))

    spark.sql("select AgeAvg(age) from user").show()
  }


  /**
    * UDAF
    * 自定义聚合函数类：计算年龄平均值
    *   继承  Aggregator  定义泛型
    *  IN: 输入的数据类型
    *  BUF: 缓冲的数据类型
    *  OUT: 输出的数据类型
    **/

  case class Buff(var total: Long,var count: Long)

  class MyAvgUDAF extends Aggregator[Long,Buff,Long] {

    //  初始值（零值）
    override def zero: Buff = {
      new Buff(0L,0L)
    }

    //  根据输入的数据，更新缓冲区
    override def reduce(b: Buff, a: Long): Buff = {
      b.count = b.count + 1
      b.total = b.total + a
      b
    }

    // 合并 merge
    override def merge(b1: Buff, b2: Buff): Buff = {
      b1.total = b1.total + b2.total
      b1.count = b1.count + b2.count
      b1
    }

    //计算
    override def finish(buff: Buff): Long = {
      buff.total / buff.count
    }

    // 缓冲区编码
    override def bufferEncoder: Encoder[Buff] = {
      Encoders.product
    }


    //  输出编码
    override def outputEncoder: Encoder[Long] = {
      Encoders.scalaLong
    }
  }

}
