package cn.doitedu.day08

import cn.doitedu.utils.IpUtils
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
import org.apache.spark.sql.expressions.Aggregator

object T05_UDAFDemo {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().appName("UDFDemo1")
      .master("local[*]")
      .getOrCreate()

    val df = spark.read
      .option("header", "true")
      .csv("data/boy.txt")

    df.createTempView("v_boy")

    import org.apache.spark.sql.functions._

    spark.udf.register("my_avg", udaf(new AvgAggregator))


    spark.sql(
      """
        |select gender, my_avg(fv) avg_fv from v_boy group by gender
        |""".stripMargin)
      .show()
  }



}

/**
 * 第一个泛型：输入数据的类型
 * 第二个泛型：中间结果类型
 * 第三个泛型：输出数据的类型
 */
class AvgAggregator extends Aggregator[Double, (Double, Int), Double] {

  //每个组的初始值
  override def zero: (Double, Int) = (0.0, 0)

  //每个组，每来一条数据，调用一次reduce方法(上游：局部聚合)
  override def reduce(b: (Double, Int), a: Double): (Double, Int) = {
    (b._1 + a, b._2 + 1)
  }

  //下游，全局聚合
  override def merge(b1: (Double, Int), b2: (Double, Int)): (Double, Int) = {
    (b1._1 + b2._1, b1._2 + b2._2)
  }

  //计算最终的结果
  override def finish(reduction: (Double, Int)): Double = {
    reduction._1 / reduction._2
  }

  //指定字段中间结果的序列化方式
  override def bufferEncoder: Encoder[(Double, Int)] = {
    Encoders.tuple(Encoders.scalaDouble, Encoders.scalaInt)
  }

  override def outputEncoder: Encoder[Double] = {
    Encoders.scalaDouble
  }
}