package com.yanggu.spark.sql

import org.apache.spark.sql.{Encoder, Encoders}
import org.apache.spark.sql.expressions.Aggregator


case class User01(age: Long, username: String)
case class AverageBuffer(var sum: Long, var count: Long)
/**
 * 定义类继承org.apache.spark.sql.expressions.Aggregator
 * 重写类中的方法
 */
class MyAverageUDAF2 extends Aggregator[User01, AverageBuffer, Double]{

  /**
   * 初始化方法
   * @return
   */
  override def zero: AverageBuffer = AverageBuffer(0L, 0L)

  /**
   *
   * @param buffer
   * @param row
   * @return
   */
  override def reduce(buffer: AverageBuffer, row: User01): AverageBuffer = {
    buffer.sum = buffer.sum + row.age
    buffer.count = buffer.count + 1
    buffer
  }

  override def merge(b1: AverageBuffer, b2: AverageBuffer): AverageBuffer = {
    b1.sum = b1.sum + b2.sum
    b1.count = b1.count + b2.count
    b1
  }

  override def finish(reduction: AverageBuffer): Double = reduction.sum.toDouble / reduction.count

  //DataSet默认额编解码器，用于序列化，固定写法
  //自定义类型就是produce   自带类型根据类型选择
  override def bufferEncoder: Encoder[AverageBuffer] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}
