package com.atguigu.sparksql.day01.udf

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders}

case class Person(name: String, age: java.lang.Long)

object MyAvg_2 extends Aggregator[Person, MyBuffer, Double] {
    
    override def zero: MyBuffer = MyBuffer(0, 0)
    
    override def reduce(b: MyBuffer, a: Person): MyBuffer = {
        if (a != null && a.age != null) {
            b.sum += a.age
            b.count += 1
        }
        b
    }
    
    override def merge(b1: MyBuffer, b2: MyBuffer): MyBuffer = {
        b1.sum += b2.sum
        b1.count += b2.count
        b1
    }
    
    override def finish(reduction: MyBuffer): Double = {
        reduction.avg
    }
    
    override def bufferEncoder: Encoder[MyBuffer] = {
        Encoders.product
    }
    
    override def outputEncoder: Encoder[Double] = {
        Encoders.scalaDouble
    }
}
