package com.atguigu.sparksql.day01.udf

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders}

case class MyBuffer(var sum: Long, var count: Long) {
    def avg = sum.toDouble / count
}

class MyAvg extends Aggregator[Long, MyBuffer, Double] {
    // 初始化缓冲区
    override def zero: MyBuffer = MyBuffer(0, 0)
    
    // 聚合: 分区内聚合
    override def reduce(b: MyBuffer, a: Long): MyBuffer = {
        if (a != 0) {
            b.sum += a
            b.count += 1
        }
        b
    }
    
    // 聚合: 分区间的聚合
    override def merge(b1: MyBuffer, b2: MyBuffer): MyBuffer = {
        b1.sum += b2.sum
        b1.count += b2.count
        b1
    }
    
    // 返回最终的聚合结果
    override def finish(reduction: MyBuffer): Double = {
        // reduction.sum.toDouble/reduction.count
        reduction.avg
    }
    
    // 缓冲区编码器
    override def bufferEncoder: Encoder[MyBuffer] = {
        Encoders.product
    }
    
    // 输出值的编码器
    override def outputEncoder: Encoder[Double] = {
        Encoders.scalaDouble
    }
}
