package com.haozhen.sql

import org.apache.spark.sql.expressions.Aggregator

/**
  * @author haozhen
  * @email haozh@ync1.com
  * @date 2021/1/31  22:02
  *
  */

case class Sales(id:Int,name1:String, sales:Double,duscount:Double,name2:String,stime:String)

case class SalesBuffer(var sales2019:Double,var sales2020:Double)

object TypeSafeUDAF extends Aggregator[Sales,SalesBuffer,Double] {
  import org.apache.spark.sql.{Encoder, Encoders}

  override def zero: SalesBuffer = SalesBuffer(0.0,0.0)

  override def reduce(b: SalesBuffer, a: Sales): SalesBuffer = {
    a.stime.take(4) match {
      case "2019"=> b.sales2019 += a.sales
      case "2020"=> b.sales2020+=a.sales
      case _=> println("error")
    }
    b
  }

  override def merge(b: SalesBuffer,a: SalesBuffer): SalesBuffer = {
    b.sales2019 +=a.sales2019
    b.sales2020 +=a.sales2020
    b
  }

  override def finish(reduction: SalesBuffer): Double = {
    if(reduction.sales2019<0.00001) 0.0
    else (reduction.sales2020-reduction.sales2019)/reduction.sales2019
  }

  //定义编码器
  override def bufferEncoder: Encoder[SalesBuffer] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}
