package spark.sql

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}

case class Employee(name : String, salary : Long)
case class Average(var sum : Long , var  count : Long)

object MyAverage extends Aggregator[Employee,Average,Double]{
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local")
      .appName("MyAverage")
      .config("spark.testing.memory", "471859200")
      .getOrCreate()

    val input = "C:\\Users\\ljb\\Desktop\\employees.json"

    val ds = spark.read.json(input).as[Employee]

    ds.show()

    val averageSalary = MyAverage.toColumn.name("average_salary")

    val result = ds.select(averageSalary)
    result.show()

  }

  def zero : Average = Average(0L,0L)

  def merge(b1 : Average, b2 : Average) : Average = {
    b1.sum += b2.sum
    b1.count += b2.count
    b1
  }
  def reduce(buffer : Average, employee : Employee): Average = {
    buffer.sum += employee.salary
    buffer.count += 1
    buffer
  }
  def finish(reduction : Average) : Double = reduction.sum.toDouble / reduction.count

  def bufferEncoder : Encoder[Average] = Encoders.product

  def outputEncoder : Encoder[Double] = Encoders.scalaDouble
}
