package top.jolyoulu.sql

import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, LongType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author: JolyouLu
 * @Date: 2024/5/16 19:21
 * @Description
 */
object Spark01_SparkSQL_UDAF {
  def main(args: Array[String]): Unit = {
    //创建SparkSQL的运行环境
    val sparkSQL: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL")
    val spark = SparkSession.builder().config(sparkSQL).getOrCreate()
    val sc: SparkContext = spark.sparkContext //将spark转换规则引入
    //DataFrame转换
    val path: String = this.getClass.getClassLoader.getResource("datas/user.json").toURI.getPath
    val df: DataFrame = spark.read.json(path)
    df.createOrReplaceTempView("user")
    //自定义一个函数放入到spark中
    spark.udf.register("ageAvg",new MyAvgUDAF)
    //使用用户自定义函数
    spark.sql("select ageAvg(age) from user").show()

    //关闭环境
    spark.close()
  }

  //自定义聚合函数类，计算年龄的平均值
  class MyAvgUDAF extends UserDefinedAggregateFunction{
    //输入数据：定义输入数据结构
    override def inputSchema: StructType = {
      StructType(
        Array(
          StructField("age",LongType)
        )
      )
    }
    //缓冲区数据：缓冲区零时存储中间数据，数据结构
    override def bufferSchema: StructType = {
      StructType(
        Array(
          StructField("total",LongType),
          StructField("count",LongType)
        )
      )
    }
    //输出数据：函数计算结构的类型
    override def dataType: DataType = LongType
    //函数的稳定性
    override def deterministic: Boolean = true
    //缓冲区的初始化
    override def initialize(buffer: MutableAggregationBuffer): Unit = {
      buffer.update(0,0L) //对应结构的第一位值
      buffer.update(1,0L) //对应结构的第二位值
    }
    //输入每条数据时的操作
    override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
      buffer.update(0,buffer.getLong(0) + input.getLong(0)) //age累加
      buffer.update(1,buffer.getLong(1) + 1) //count累加
    }
    //分布式计算完毕的合并操作
    override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
      buffer1.update(0,buffer1.getLong(0) + buffer2.getLong(0))
      buffer1.update(1,buffer1.getLong(1) + buffer2.getLong(1))
    }
    //最后的计算
    override def evaluate(buffer: Row): Any = {
      buffer.getLong(0) / buffer.getLong(1)
    }
  }
}
