package com.niit.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders, SparkSession, functions}

object Spark_SQL_UDAF {

  def main(args: Array[String]): Unit = {
    //求user.json的年龄平均值
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSql")
    val spark = SparkSession.builder().config(sparkConf).getOrCreate();

    val df = spark.read.json("input/user.json")
    df.createOrReplaceTempView("user")

    //自定义求年龄平均值的 UDAF函数
    spark.udf.register("ageAvg",functions.udaf(new MyAvgUDAF() ))


    spark.sql("select ageAvg(age) from user").show()







    spark.close();

  }
  /*
  自定义聚合函数类，计算平均年龄
   1.继承org.apache.spark.sql.expressions.Aggregator，定义泛型
      1.IN:输入数据的类型 Long
      2.BUF:缓冲区的数据类型 Buff
      3.OUT:输出的数据类型 Long

   2.重写六个方法
   */
                  //   总数    /     个数
  case class Buff(var total:Long,var count:Long)
  class MyAvgUDAF extends Aggregator[Long,Buff,Long]{
    //初始值方法 零值
    override def zero: Buff = {
      Buff(0L,0L)
    }
    //根据输入的数据更新缓冲区的数据  in:30    40     50
    override def reduce(buff: Buff, in: Long): Buff = {
      buff.total = buff.total + in//0+30   0+40   0+50
      buff.count = buff.count + 1 //0+1    0+1    0+1
      buff
    }
    //合并缓冲区
    override def merge(b1: Buff, b2: Buff): Buff = {
      b1.total = b1.total + b2.total //0+30+40+50
      b1.count = b1.count + b2.count //0+1+1+1
      b1
    }

    override def finish(buff: Buff): Long = {
      buff.total / buff.count
    }

    override def bufferEncoder: Encoder[Buff] = Encoders.product

    override def outputEncoder: Encoder[Long] = Encoders.scalaLong
  }


}
