package chapter11

import org.apache.spark.SparkConf
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DataTypes, DoubleType, LongType, StructField, StructType}

/**
 * author: yuhui
 * descriptions:  使用弱类型用户自定义UDAF入门示例：求薪资的平均值
 * date: 2024 - 11 - 29 2:12 下午
 */
class UDAFWeak extends UserDefinedAggregateFunction {
  //函数输入的数据结构，需要new一个具体的结构对象，然后添加结构
  override def inputSchema: StructType = {
    new StructType().add("salary",LongType)
  }

  //计算时的数据结构
  override def bufferSchema: StructType = {
    new StructType().add("sum",LongType).add("conut",LongType)
  }

  //函数返回的数据类型
  override def dataType: DataType = DoubleType

  //表述函数是否稳定
  override def deterministic: Boolean = true

  //表述的是函数计算之前的缓冲区的初始化 buffer(0)表示第一个结构：sum， buffer(1)示第二个结构：count
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer(0) = 0L
    buffer(1) = 0L
  }

  //根据查询结构来更新缓冲区数据sum + = input.getLong  count+=1
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    buffer(0) = buffer.getLong(0) + input.getLong(0)
    buffer(1) = buffer.getLong(1) + 1
  }

  //将多个节点的缓冲区合并
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    buffer1(0) = buffer1.getLong(0) + buffer2.getLong(0)
    buffer1(1) = buffer1.getLong(1) + buffer2.getLong(1)
  }

  //计算
  override def evaluate(buffer: Row): Any = {
    buffer.getLong(0).toDouble / buffer.getLong(1)
  }
}


object UDAFWeak{

  def main(args: Array[String]): Unit = {
    //创建配置对象
    val conf = new SparkConf().setAppName("Spark01_Custom").setMaster("local[*]")

    val spark = SparkSession.builder().config(conf).getOrCreate()

    //隐士转换（RDD转换DF/DS需要引入隐式转换）
    import spark.implicits._

    val frame =  spark.sparkContext.parallelize(
      List(
        ("余辉",20000),
        ("视频号：辉哥大数据",30000),
        ("抖音：辉哥大数据",40000))
    ).toDF("name","salary")

    //创建全局视图
    frame.createGlobalTempView("people")
    frame.show()

//    +------------------+------+
//    |              name|salary|
//    +------------------+------+
//    |              余辉| 20000|
//    |视频号：辉哥大数据  | 30000|
//    |  抖音：辉哥大数据 | 40000|
//    +------------------+------+

    //创建聚合函数对象
    val udaf = new UDAFWeak
    //注册聚合函数
    spark.udf.register("avgSalary",udaf)

    //frame.select("salary").show()
    //sql  这里表名要把全局名也写上
    spark.sql("select avgSalary(salary) from global_temp.people").show

//    +-----------------+
//    |avgsalary(salary)|
//    +-----------------+
//    |          30000.0|
//    +-----------------+
  }

}