package com.study.spark.scala.udaf

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}


/**
 * Aggregator是类型安全的UDAF
 *
 * @author stephen
 * @date 2019-09-27 10:30
 */
object AggregatorDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("UserDefinedAggregateFunctionDemo")
      .getOrCreate()

    import spark.implicits._

    val df = Range(0, 10).toSeq.toDF("num")
    df.createOrReplaceTempView("tmp")

    // 将函数转换成`TypedColumn`，可以在DataSet中使用
    df.as[Long].select(AverageAggregator.toColumn.name("avg")).show()

    spark.stop()
  }
}

/**
 * 计算平均值
 * 泛型分别为：输入数据类型 中间缓存类型 输出结果类型
 */
object AverageAggregator extends Aggregator[Long, Average, Double] {

  //1.初始化buffer
  override def zero: Average = Average(0L, 0L)

  //2.处理一条新的记录
  override def reduce(b: Average, a: Long): Average = {
    b.sum += a
    b.count += 1
    b
  }

  //3.合并聚合buffer
  override def merge(b1: Average, b2: Average): Average = {
    b1.sum += b2.sum
    b1.count += b2.count
    b1
  }

  //4.最终输出结果
  override def finish(reduction: Average): Double = {
    reduction.sum.toDouble / reduction.count
  }

  //5.为中间值类型提供一个编码器
  override def bufferEncoder: Encoder[Average] = Encoders.product

  //6.最终的输出结果提供一个编码器
  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}

/**
 * 计算平均值过程中使用的Buffer
 *
 * @param sum
 * @param count
 */
case class Average(var sum: Long, var count: Long)