package cn.whuc.sparksql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, SparkSession, functions}

object Demo_UDAF_02 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("sql").setMaster("local[*]")
    val ss: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    val df: DataFrame = ss.read.json("input/user.json")

    df.createOrReplaceTempView("t1")

    ss.udf.register("Avgage",functions.udaf(new AvgageAggregator()))

    ss.sql(
      """
        |select
        |     Avgage(age) avgage
        |from
        |     t1
        |""".stripMargin).show()


    ss.sql("select avg(age) from t1").show()

    ss.stop()
  }
}

// 统计计算平均年龄的缓存对象
case class Buffer1(var sum:Long,var count:Long)

// abstract class Aggregator[-IN, BUF, OUT] extends Serializable
class AvgageAggregator extends Aggregator[Long,Buffer1,Double] {
  override def zero: Buffer1 = new Buffer1(0L,0L)

  override def reduce(b: Buffer1, a: Long): Buffer1 = {
    b.sum+=a
    b.count+=1
    b
  }

  override def merge(b1: Buffer1, b2: Buffer1): Buffer1 = {
    b1.sum+=b2.sum
    b1.count+=b2.count
    b1
  }

  override def finish(reduction: Buffer1): Double = {
    reduction.sum/reduction.count.toDouble
  }

  override def bufferEncoder: Encoder[Buffer1] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}

