package com.SparkSQL


import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, LongType, StructField, StructType}
import org.apache.spark.sql.{Encoder, Encoders, Row, SparkSession, functions}

import scala.Console.in

/**
 * UDAF  强类型实现自定义函数
 */
object Spark03_SparkSQL_UDAF1 {
  def main(args: Array[String]): Unit = {
    //创建SparkSQL运行环境
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("UDF")
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    val df = spark.read.json("datas/user.json")
    df.createOrReplaceTempView("user")

    //自定义函数在每个username前面加一个Name
    //spark3.0支持这样的操作，2.4不支持
    //spark.udf.register("ageAVG" , functions.udaf(new MyAvgUDAF()) )

    spark.sql("select ageAVG(age)  from user").show

    //关闭连接
    spark.close()

  }

  /**
   * 自定义聚合函数类，计算年龄的平均值
   * 1、继承 org.apache.spark.sql.expressions.Aggregator  定义泛型
   * IN  ： 输入的数据类型
   * BUF  ； 缓冲区的数据类型
   * OUT  ： 输出的数据类型
   * 2、重写方法 （6）
   */

  case class Buff( var total: Long , var count : Long )
  class MyAvgUDAF extends Aggregator[Long , Buff , Long] {

//    //z & zero 初始值 或 零值
//    // 缓冲区的初始化
//    override def zero: Buff = {
//      Buff(0L, 0L)
//    }
//
//    //根据输入的数据更新缓冲区的数据
//    override def reduce(b: Buff, a: Long): Buff = {
//      b.total = b.total + in
//      b.count = b.count + 1
//      b
//    }
//
//    //合并缓冲区
//    override def merge(b1: Buff, b2: Buff): Buff = {
//      b1.total = b1.total + b2.total
//      b1.count = b1.count + b2.count
//      b1
//    }
//
//    //结算结果
//    override def finish(reduction: Buff): Long = {
//      reduction.total / reduction.count
//    }
//
//    //缓冲区编码操作
//    override def bufferEncoder: Encoder[Buff] = Encoders.product
//
//    //输出的编码操作
//    override def outputEncoder: Encoder[Long] = Encoders.scalaLong

    //初始化缓冲区
    override def zero: Buff = {
      Buff(0L , 0L)
    }

    //根据数据更新缓冲区数据
    override def reduce(b: Buff, a: Long): Buff = {
      b.total = b.total + a
      b.count = b.count + 1
      b
    }

    override def merge(b1: Buff, b2: Buff): Buff = {
      b1.total = b1.total + b2.total
      b1.count = b1.count + b2.count
      b1
    }

    override def finish(buff: Buff): Long = {
      buff.total / buff.count
    }

    override def bufferEncoder: Encoder[Buff] = Encoders.product

    override def outputEncoder: Encoder[Long] = Encoders.scalaLong
  }
}
