package com.doit.day07

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder, Encoders}

/**
 * @Author: Hang.Nian.YY
 * @WX: 17710299606
 * @Tips: 学大数据 ,到多易教育
 * @DOC: https://blog.csdn.net/qq_37933018?spm=1000.2115.3001.5343
 * @Description:
 * max 最大年龄
 * 泛型1  输入    年龄  Int
 * 泛型2  计算的中间缓存
 * 泛型3  输出结果
 */


class MyUdaf02 extends  Aggregator[Int , Long , Int]{
  var  buffer  = 0L
  override def zero: Long = {
    buffer = 0L
    buffer
  }

  override def reduce(b: Long, a: Int): Long = {
    if(b>a){
      buffer = b
    }else{
      buffer = a
    }
    buffer
  }

  override def merge(b1: Long, b2: Long): Long = ???

  override def finish(reduction: Long): Int = reduction.toInt

  override def bufferEncoder: Encoder[Long] = Encoders.scalaLong

  override def outputEncoder: Encoder[Int] =  Encoders.scalaInt
}
