package sparksqls.customizedfunction

import org.apache.spark.sql.{Encoder, Encoders}
import org.apache.spark.sql.expressions.Aggregator

/**
 * calculate average age of listed people
 * this class is similar to scala combineByKey()
 */
class CustomizedAverage extends Aggregator[Person2, AgeBuffer, Double] {
  // make initial value in buffer zone
  override def zero: AgeBuffer = AgeBuffer(0L, 0L)

  //realize aggregation in buffer zone
  override def reduce(b: AgeBuffer, a: Person2): AgeBuffer = {
    b.sum += a.age
    b.count += 1
    b
  }

  // combine buffer zone (with partitions, between tasks)
  override def merge(b1: AgeBuffer, b2: AgeBuffer): AgeBuffer = {
    b1.sum += b2.sum
    b1.count += b2.count
    b1
  }

  // final reduce, will define the last result
  override def finish(reduction: AgeBuffer): Double = {
    reduction.sum.toDouble / reduction.count
  }

  // set encoders, this will convert a java object into spark-sql dataset
  override def bufferEncoder: Encoder[AgeBuffer] = {
    Encoders.product
  }

  // originally this will be realized in implicits
  // in this class, we need to point out specific encoder
  override def outputEncoder: Encoder[Double] = {
    Encoders.scalaDouble
  }
}
