package com.bw.sparksql1.job1
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
  *
  * 自定义：UDAF
  */
object Job12 extends UserDefinedAggregateFunction {

  /**
    * 定义输入的数据类型
    */
  override def inputSchema: StructType = StructType(
    StructField("salary",DoubleType,true) :: Nil
  )

  /**
    * 定义输出的数据类型
    */
  override def dataType: DataType = DoubleType

  /**
    * 定义辅助字段：
    * 辅助字段一 total:
    * 用来记录总工资
    * 辅助字段二：count:
    * 用来记录总人数
    */
  override def bufferSchema: StructType = StructType(
    StructField("total",DoubleType,true) ::
    StructField("count",IntegerType,true) ::
    Nil
  )
  /**
    * 最后的计算的目标函数
    */
  override def evaluate(buffer: Row): Any = {
    val total = buffer.getDouble(0)
    val count = buffer.getInt(1)
    //平均工资
    total/count
  }

  /**
    * 初始化辅助字段
    */
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    buffer.update(0,0.0)
    buffer.update(1,0)
  }

  /**
    * 更新辅助字段的值
    * 局部
    */
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    val lastTotal = buffer.getDouble(0)
    val lastCount = buffer.getInt(1)
    val currentSalary = input.getDouble(0)
    buffer.update(0,lastTotal + currentSalary)
    buffer.update(1,lastCount+1)
  }

  /**
    * 全局的
    */
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    val total1 = buffer1.getDouble(0)
    val count1 = buffer1.getInt(1)
    val total2 = buffer2.getDouble(0)
    val count2 = buffer2.getInt(1)
    buffer1.update(0,total1 + total2)
    buffer1.update(1,count1 + count2)
  }


  //输入和输出的数据类型是否一致
  override def deterministic: Boolean = true

}