package com.study.spark.scala.udaf

import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._

/**
 * 使用UserDefinedAggregateFunction的套路：
 *
 * 1. 自定义类继承UserDefinedAggregateFunction，对每个阶段方法做实现
 *
 * 2. 在spark中注册UDAF，为其绑定一个名字
 *
 * 3. 然后就可以在sql语句中使用上面绑定的名字调用
 *
 * @author stephen
 * @date 2019-09-27 10:09
 */
object UserDefinedAggregateFunctionDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("UserDefinedAggregateFunctionDemo")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val df = Range(0, 10).toSeq.toDF("num")
    df.createOrReplaceTempView("tmp")

    val uavg = new AverageUserDefinedAggregateFunction
    spark.udf.register("u_avg", uavg)
    // SQL方式
    spark.sql("select count(1) as cnt, u_avg(num) as avg from tmp").show()
    // DataFrame方式
    df.select(count($"num").as("cnt"), uavg($"num").as("avg")).show()

    spark.stop()
  }
}

/**
 * 计算平均值
 */
class AverageUserDefinedAggregateFunction extends UserDefinedAggregateFunction {
  //1.定义输入数据的类型
  override def inputSchema = {
    StructType(StructField("input", LongType) :: Nil)
  }

  //2.定义中间数据的类型
  override def bufferSchema = {
    StructType(StructField("sum", LongType) :: StructField("count", LongType) :: Nil)
  }

  //3.定义返回结果的类型
  override def dataType: DataType = {
    DoubleType
  }

  //4.聚合函数是否是幂等的，即相同输入是否总是能得到相同输出，一般都是true
  override def deterministic = true

  //5.定义初始化函数，就是些初始值的处理。
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer(0) = 0L
    buffer(1) = 0L
  }

  //6.定义update函数，对于一个partition来说，里面的每条数据都会经过update
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    if (input.isNullAt(0)) return
    buffer(0) = buffer.getLong(0) + input.getLong(0)
    buffer(1) = buffer.getLong(1) + 1
  }

  //7.定义merge函数，处理所有partition的全局聚合
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    //每个分区计算的结果进行相加
    buffer1(0) = buffer1.getLong(0) + buffer2.getLong(0)
    buffer1(1) = buffer1.getLong(1) + buffer2.getLong(1)
  }

  //8.定义evaluate函数，返回最终的结果
  override def evaluate(buffer: Row): Any = {
    buffer.getLong(0).toDouble / buffer.getLong(1)
  }
}
