package cn.whuc.sparksql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, SparkSession, functions}

object Demo_UDAF_01 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("sql").setMaster("local[*]")
    val ss: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    val datas: RDD[String] = ss.sparkContext.textFile("input/Data01.txt")

    import ss.implicits._

    val df: DataFrame = datas.map(line => {
      val strings: Array[String] = line.split(",")
      (strings(0), strings(1), strings(2))
    }).toDF("name", "course", "score")

    df.createTempView("t1")

    ss.udf.register("AvgScore",functions.udaf(new AvgAggregator()))

    ss.sql("select course,AvgScore(score) AvgScore from t1 group by course").show()

    ss.sql("SELECT course, AVG(score) AS average_score FROM t1 GROUP BY course").show()

    ss.sql("SELECT name, AVG(score) AS average_score FROM t1 GROUP BY name ORDER BY average_score DESC").show(10)

    ss.stop()
  }
}

// udaf中需要一个缓存对象 用于 累计
case class Buffer(var sum:Long,var count:Long)

class AvgAggregator extends Aggregator[Long,Buffer,Double] {

  // 初始化  缓存对象
  override def zero: Buffer = new Buffer(0L,0L)

  // 分区内 缓存对象和每条数据聚合函数
  // a是案例中的 分数
  override def reduce(b: Buffer, a: Long): Buffer ={
    b.sum+=a
    b.count+=1
    // 返回缓存对象
    b
  }

  // 分区间 缓存对象的聚合函数
  override def merge(b1: Buffer, b2: Buffer): Buffer = {
    b1.sum+=b2.sum
    b1.count+=b2.count
    // 返回总的分数和计数值
    b1
  }

  // 计算平均值函数
  override def finish(reduction: Buffer): Double = {
    reduction.sum/reduction.count.toDouble
  }

  // 固定写法 样例类 编码使用样例类父类
  override def bufferEncoder: Encoder[Buffer] = Encoders.product

  // 输出类型 编码
  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble

}
