package dataframe

import org.apache.spark.SparkConf
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.functions._

object DataFrame_UDAFTest {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("DataFrame_UDAFTest")

    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()
    import spark.implicits._

    val employeeDS: Dataset[Employee] = spark
      .read
      .json("data/employees.json")
      .as[Employee]

    employeeDS.show()

    employeeDS.select(new MyAvg().toColumn.name("AVG")).show()

    employeeDS
      .groupByKey(s=>s.name)
      .agg(new MyAvg().toColumn.name("T"))
      .show()


    spark.stop()


  }

  class MyAvg extends Aggregator[Employee,Average,Double]{

    // var myAvg: Average =Average(0L,0L)

    // 这个聚合的零值，应该满足任意b + 0 = b的性质
    override def zero: Average = Average(0L,0L)

    // 合并两个值以生成一个新值。
    // 为了提高性能，该函数可以修改' buffer '并返回它，而不是构造一个新对象
    override def reduce(b: Average, a: Employee): Average = {
      b.employee_salarySum+=a.salary
      b.employee_count+=1L
      b
    }

    // 合并两个中间值
    override def merge(b1: Average, b2: Average): Average = {
      b1.employee_salarySum+=b2.employee_salarySum
      b1.employee_count+=b2.employee_count
      b1
    }

    // 转换reduce的输出
    override def finish(reduction: Average): Double = reduction.employee_salarySum.toDouble/reduction.employee_count

    // 指定中间值类型的编码器
    override def bufferEncoder: Encoder[Average] = Encoders.product

    // 指定最终输出值类型的编码器
    override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
  }

  case class Employee(name:String,salary:Long)

  case class Average(var employee_salarySum:Long,var employee_count:Long)
}
