package sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, LongType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, Encoders, Row, SparkSession, TypedColumn, functions}

object SparkSql3 {
  //自定义聚合函数类，需要继承Aggregator
  def main(args:Array[String]):Unit={
    val sparkconf=new SparkConf().setMaster("local[*]").setAppName("sparkSql")
    val spark=SparkSession.builder().config(sparkconf).getOrCreate()
    val df: DataFrame = spark.read.json("data/user.json")
    df.createOrReplaceTempView("user")
//    spark.udf.register("ageAvg",functions.udaf(new MyAvgUDAF()))
    import spark.implicits._
    val ds: Dataset[User] = df.as[User]
    ds.show()
    //将UDAF函数转换为查询的列对象
    val udafCol: TypedColumn[Long, Long] = new MyAvgUDAF().toColumn
    ds.select(udafCol).show()
  }
  case class User(age:Long,username:String)
  /**
   * 自定义聚合函数类(用强类型转换数据)：计算平均年龄
   * 1、需要继承spark.sql.expressions.Aggregator,并定义泛型
   *     IN：输入的数据类型Long
   *     BUF:缓冲区的数据类型 Buff
   *     OUT:输出的数据类型Long
   *  2、重写6个方法*/
  case class Buff(var total:Long,var count:Long)
  class MyAvgUDAF extends Aggregator[Long,Buff,Long]{
    //初始值或者零值，用于缓冲区的初始化
    override def zero: Buff = {
        Buff(0L,0L)
    }
    //根据输入的数据更新缓冲区的数据
    override def reduce(buff: Buff, in: Long): Buff = {
      buff.total=buff.total+in
      buff.count=buff.count+1
      buff
    }
    //合并缓冲区
    override def merge(buff1: Buff, buff2: Buff): Buff = {
       buff1.total=buff1.total+buff2.total
       buff1.count=buff1.count+buff2.count
      buff1
    }
   //计算结果
    override def finish(reduction: Buff): Long = {
      reduction.total/reduction.count
    }
   //缓冲区的编码操作
    override def bufferEncoder: Encoder[Buff] = Encoders.product

    override def outputEncoder: Encoder[Long] = Encoders.scalaLong
  }

}

