package cn.jly.bigdata.spark.sql

import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession, TypedColumn}
import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Aggregator

/**
 * @author lanyangji
 * @date 2019/12/1 15:55
 */
object SparkSql05_UDAF_Class {

  def main(args: Array[String]): Unit = {

    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSql05_UDAF_Class")
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._

    // df -> ds
    val ds: Dataset[Employee] = spark.read.json("input/people.json").as[Employee]
    ds.show()

    // 强类型聚合函数不能用sql的方式
    // 创建强类型聚合函数对象
    val myAvgClass = new MyAvgClass
    val avgAge: TypedColumn[Employee, Double] = myAvgClass.toColumn.name("avg_age")

    // 应用函数
    ds.select(avgAge).show()

    // 释放资源
    spark.close()
  }
}

// 数据类型
case class Employee(name: String, age: BigInt)

// 缓冲区类型
case class AvgBuffer(var sum: BigInt, var count: Int)

// 强类型 UDAF
class MyAvgClass extends Aggregator[Employee, AvgBuffer, Double] {

  // 定义一个数据结构，保存中间计算需要的属性，缓冲区对象
  override def zero: AvgBuffer = AvgBuffer(0, 0)

  // 单个executor的计算
  override def reduce(b: AvgBuffer, a: Employee): AvgBuffer = {

    // sum
    b.sum += a.age
    // count
    b.count += 1
    b
  }

  // 不同executor之间的计算
  override def merge(b1: AvgBuffer, b2: AvgBuffer): AvgBuffer = {

    // sum
    b1.sum += b2.sum
    // count
    b1.count += b2.count
    b1
  }

  // 完成计算
  override def finish(reduction: AvgBuffer): Double = reduction.sum.toDouble / reduction.count.toDouble

  // 设定中间值类型编码器，要转换成case类
  // Encoders.product是进行scala元组和case类转换的编码器
  override def bufferEncoder: Encoder[AvgBuffer] = Encoders.product

  // 设置最终输出值的编码器
  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}
