package com.atbeijing.bigdata.spark.mytest.sql

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.SparkConf
import org.apache.spark.sql.{ Encoder, Encoders, SparkSession, functions}

object Spark_UDAF {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("UDF")
    val ss: SparkSession = SparkSession.builder().config(conf).getOrCreate()
    import ss.implicits._

    val rdd1 = ss.sparkContext.makeRDD(
      List(
        (1, "zhangsan", 30),
        (2, "lisi", 40),
        (3, "wangwu", 50)
      )
    )

    val df = rdd1.toDF("id", "name", "age")

    //创建视图
    df.createOrReplaceTempView("student")

    val udaf = new MyAgeAvg()

    val ds = df.as[Student]

    ds.select(udaf.toColumn).show

    ss.stop()
  }

  case class Student(id:Int,name:String,age:Int)
  case class AgeBuffer(var total:Int, var count:Int)

  class MyAgeAvg extends Aggregator[Student,AgeBuffer,Int] {
    override def zero: AgeBuffer = {
      AgeBuffer(0,0)
    }

    override def reduce(b: AgeBuffer, a: Student): AgeBuffer = {
      b.total += a.age
      b.count += 1
      b
    }

    override def merge(b1: AgeBuffer, b2: AgeBuffer): AgeBuffer = {
      b1.total += b2.total
      b1.count += b2.count
      b1
    }

    override def finish(reduction: AgeBuffer): Int = {
      reduction.total/reduction.count
    }

    override def bufferEncoder: Encoder[AgeBuffer] = Encoders.product

    override def outputEncoder: Encoder[Int] = Encoders.scalaInt
  }

}
