package com.king.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, Encoders, SparkSession, TypedColumn}

/**
 * @Author wdl
 * @Date 2022/12/1 22:04
 */
object SparkSQL04_UDAF {
  def main(args: Array[String]): Unit = {
    //新的起点: SparkSession

    val sparkConf: SparkConf = new SparkConf().setAppName("SparkSQL").setMaster("local[*]")

    val spark: SparkSession =
      SparkSession.builder()
        //.appName("SparkSQL")
        //.master("local[*]")
        .config(sparkConf)
        .getOrCreate()

    //写代码不管用不用都导入。
    import spark.implicits._

    val df: DataFrame = spark.read.json("E:\\work\\big-data-2020\\spark-api\\input\\user.json")
    val ds: Dataset[User] = df.as[User]

    val my_Avg_Aggregator1 = new My_Avg_Aggregator1

    val column: TypedColumn[User, Double] = my_Avg_Aggregator1.toColumn

    ds.select(column).show()

    spark.stop()
  }
}

/**
 * 泛型:
 * IN: User
 * BUF: AgeBuffer
 * OUT: Double
 */
class My_Avg_Aggregator1 extends Aggregator[User, AgeBuffer, Double]{
  override def zero: AgeBuffer = AgeBuffer(0L,0L)

  override def reduce(b: AgeBuffer, a: User): AgeBuffer = {
    b.totalAge += a.age.toLong
    b.totalCount += 1
    b
  }

  override def merge(b1: AgeBuffer, b2: AgeBuffer): AgeBuffer = {
    b1.totalAge += b2.totalAge
    b1.totalCount += b2.totalCount
    b1
  }

  override def finish(buffer: AgeBuffer): Double = buffer.totalAge / buffer.totalCount.toDouble

  override def bufferEncoder: Encoder[AgeBuffer] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}