package org.shj.spark.udaf

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.Encoders
import org.apache.spark.sql.Encoder
import org.apache.spark.sql.SparkSession
import org.shj.spark.util.Util

case class Employee(name: String, salary: Long)
case class Average(var sum: Long, var count: Long)

/**
 * 本例演示 Type-Safe 自定义聚合函数
 */
object TypeSafeUDAF extends Aggregator[Employee, Average, Double]{
  //此聚合函数的 "zero" 值， 任何 b+zero = b
  def zero: Average = new Average(0L, 0L)
  
  //组合两个值成一个值。 为了更高的效率， 应该修改buffer的值，而不是返回一个新的对象
  def reduce(buffer: Average, employee: Employee): Average = {
    buffer.sum += employee.salary
    buffer.count += 1
    buffer
  }
  
  //合并两个中间结果值
  def merge(b1: Average, b2: Average): Average = {
    b1.sum += b2.sum
    b1.count += b2.count
    b1
  }
  
  //最后的结果
  def finish(reduction: Average): Double = reduction.sum.toDouble / reduction.count
  
  //指定中间结果值类型的 Encoder
  def bufferEncoder: Encoder[Average] = Encoders.product
  
  //指定最终结果值类型的 Encoder
  def outputEncoder: Encoder[Double] = Encoders.scalaDouble
  
  
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().appName("TypeSafeUDAF").master("local").getOrCreate()
    spark.sparkContext.setLogLevel("WARN")
    
    import spark.implicits._
    
    val ds = spark.read.json(Util.fullPath("employees.json")).as[Employee]
    ds.show()
    
    val avgSalary = TypeSafeUDAF.toColumn.name("avg_salary")
    ds.select(avgSalary).show()
    
    spark.stop()
  }
}