package sparkSQL.study

import org.apache.spark.SparkConf
import org.apache.spark.sql.{Encoder, Encoders, SparkSession, functions}
import org.apache.spark.sql.expressions.Aggregator

object Spark01_SparkSQL_Basic3 {

    def main(args: Array[String]): Unit = {

        val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL_UDF...")
        val sparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

        val dataFrame = sparkSession.read.json("src\\main\\scala\\data\\user.json")
        dataFrame.createOrReplaceTempView("user")

        sparkSession.udf.register("aveAvg", functions.udaf(new MyAvgUDAF))
        sparkSession.sql("select aveAvg(age) from user").show

        sparkSession.stop()
    }

    /**
     * 自定义聚合函数类：计年龄平均值
     * 定义类继承 org.apache.spark.sql.expressions.Aggregator
     * 重写方法
     */
    case class Buff(var total: Long, var count: Long)
    class MyAvgUDAF extends Aggregator[Long, Buff, Long]{

        //z & zore ：初始值或零值
        //缓冲区的初始化
        override def zero: Buff = {
            Buff(0L, 0L)
        }

        //根据输入的数据更新缓冲区的数据
        override def reduce(b: Buff, a: Long): Buff = {
            b.total = b.total + a
            b.count = b.count + 1
            b
        }

        //合并缓冲区
        override def merge(b1: Buff, b2: Buff): Buff = {
            b1.total = b1.total + b2.total
            b1.count = b1.count + b2.count
            b1
        }

        //计算结果
        override def finish(reduction: Buff): Long = {
            reduction.total / reduction.count
        }

        //缓冲区编码操作
        override def bufferEncoder: Encoder[Buff] = Encoders.product
        //缓冲区编码操作
        override def outputEncoder: Encoder[Long] = Encoders.scalaLong


    }

}
