package com.king.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, SparkSession, functions}

/**
 * @Author wdl
 * @Date 2022/12/1 16:37
 */
object SparkSQL03_UDAF {

  def main(args: Array[String]): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName("SparkSQL").setMaster("local[*]")

    val spark: SparkSession =
      SparkSession.builder()
        //.appName("SparkSQL")
        //.master("local[*]")
        .config(sparkConf)
        .getOrCreate()
    //写代码开始不管用不用都导入。
    import spark.implicits._

    val df: DataFrame = spark.read.json("E:\\work\\big-data-2020\\spark-api\\input\\user.json")
    df.createOrReplaceTempView("user")

    //注册函数
    spark.udf.register("my_avg", functions.udaf(new My_Avg_Aggregator))

    spark.sql("select my_avg(age) from user").show()

    spark.stop()

  }
}
case class AgeBuffer ( var totalAge : Long  , var totalCount :Long )

/**
 * 自定义UDAF （强类型）
 *
 * 1. 继承 Aggregator
 * 2. 指定泛型
 *     IN：  Long
 *     BUFF: 缓冲区  AgeBuffer
 *     OUT:  Double
 */
class My_Avg_Aggregator  extends Aggregator[Long,AgeBuffer,Double]{

  override def zero: AgeBuffer = AgeBuffer(0L,0L)

  override def reduce(buffer: AgeBuffer, age: Long): AgeBuffer = {
    buffer.totalAge += age
    buffer.totalCount += 1
    buffer
  }

  override def merge(b1: AgeBuffer, b2: AgeBuffer): AgeBuffer = AgeBuffer(b1.totalAge + b2.totalAge, b1.totalCount + b2.totalCount)

  override def finish(reduction: AgeBuffer): Double = reduction.totalAge / reduction.totalCount.toDouble

  override def bufferEncoder: Encoder[AgeBuffer] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}
