package com.bigdata.spark.sql

import com.bigdata.spark.sql.Spark03_SparkSQL_UDAF2.Buff
import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, Row, SparkSession, functions}

object SparkSQL_Avg_Age_UDAF_4 {

    def main(args: Array[String]): Unit = {

        // TODO 计算平均年龄
        val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkSQL")
        val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

        val df: DataFrame = spark.read.json("datas/user.json")
        df.createOrReplaceTempView("user")

        spark.udf.register("ageAvg", functions.udaf(new MyAveragUDAF1()))

        spark.sql("select ageAvg(age) from user").show

    }


    /**
     * 自定义聚合函数类：计算年龄的平均值
     * 1. 继承org.apache.spark.sql.expressions.Aggregator, 定义泛型
     * IN : 输入的数据类型 Long
     * BUF : 缓冲区的数据类型 Buff
     * :total 年龄的总和
     * :count 记录的数量
     * OUT : 输出的数据类型 Long
     * 2. 重写方法(6)
     */
    case class Buffer(var total: Long, var count: Long)

    class MyAveragUDAF1 extends Aggregator[Long, Buffer, Long] {

        // 缓冲区的初始化
        override def zero: Buffer = {
            Buffer(0L, 0L)
        }

        // 根据输入的数据更新缓冲区的数据
        override def reduce(buff: Buffer, in: Long): Buffer = {
            buff.total = buff.total + in
            buff.count += 1
            buff
        }

        // 合并缓冲区
        override def merge(b1: Buffer, b2: Buffer): Buffer = {
            b1.total = b1.total + b2.total
            b1.count = b1.count + b2.count
            b1
        }

        //计算结果
        override def finish(buff: Buffer): Long = {
            buff.total / buff.count
        }

        // 缓冲区的编码操作
        override def bufferEncoder: Encoder[Buffer] = Encoders.product

        override def outputEncoder: Encoder[Long] = Encoders.scalaLong
    }
}
