package cn.doitedu.udf

import cn.doitedu.util.SparkUtil
import org.apache.spark.sql.expressions.{Aggregator, UserDefinedAggregateFunction}
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, Row}

/**
 * @Date 22.4.13
 * @Created by HANGGE
 * @Description
 */
object C03_Demo03 {
  def main(args: Array[String]): Unit = {
    val session = SparkUtil.getSession
     import org.apache.spark.sql.functions._
     import session.implicits._
    // 加载数据
    val df = session.read.json("data\\json\\user.json")
    // 注册自己的聚合函数
    val function = udaf(MyMaxSecondAge)
    // 指定是聚合函数
    session.udf.register("my_max_second_age", udaf(MyMaxSecondAge))

    df.createOrReplaceTempView("json")

    session.sql(
      """
        |select
        |gender ,
        |my_max_second_age(age)
        |from
        |json
        |group by gender
        |
        |""".stripMargin).show()




    session.udf.register("my_avg", udaf(MyAgeAvg))
    session.sql(
      """
        |select
        |gender ,
        |my_max_second_age(age) ,
        |my_avg(age) as  avg_age
        |from
        |json
        |group by gender
        |
        |""".stripMargin).show()

    df.printSchema()
    df.show()
  }
}

/**
 * 中间结果是
 * @param max
 * @param second
 */
case class  Buf(max:Int, second:Int)
// UserDefinedAggregateFunction 过期
// 聚合函数 继承Aggregator
object  MyMaxSecondAge  extends  Aggregator[Int , Buf, String] {
  /**
   * 初始值
   * @return
   */
  override def zero: Buf = Buf(0,0)

  /**
   * 每个分区间 的计算
   * @param b
   * @param age
   * @return
   */
  override def reduce(b: Buf, age: Int): Buf = {
    // 中间数据 + 新的年龄  降序排序
    val ls = List[Int](b.max, b.second, age).sortBy(x => -x)
    // 获取两个
    Buf(ls(0) , ls(1))
  }

  /**
   * 汇总到全局计算
   * @param b1
   * @param b2
   * @return
   */
  override def merge(b1: Buf, b2: Buf): Buf = {
    val ls = List(b1.max, b1.second, b2.max, b2.second).sorted.reverse
    Buf(ls(0) , ls(1))
  }

  override def finish(reduction: Buf): String = s"${reduction.max}_${reduction.second}"

  override def bufferEncoder: Encoder[Buf] = Encoders.product

  override def outputEncoder: Encoder[String] = Encoders.STRING
}

case class  Buf2(sum:Int, count:Int)
object  MyAgeAvg extends  Aggregator[Int , Buf2 , Double] {
  override def zero: Buf2 = Buf2(0,0)

  override def reduce(b: Buf2, age: Int): Buf2 = {
    Buf2(b.sum+age , b.count+1)
  }

  override def merge(b1: Buf2, b2: Buf2): Buf2 = {
    Buf2(b1.sum+b2.sum , b1.count+b2.count)
  }

  override def finish(reduction: Buf2): Double = reduction.sum / reduction.count.toDouble

  override def bufferEncoder: Encoder[Buf2] =  Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}