package sparkSQL.study

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession, functions}
import org.apache.spark.sql.expressions.Aggregator

object UDAF_test {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[*]").setAppName("udafTest")
    val context = new SparkContext(conf)
    val sc = SparkSession.builder().config(conf).getOrCreate()
    import sc.implicits._

    val data: RDD[(String, Int)] = context.makeRDD(List(("zhangsan", 20), ("lisi", 30), ("wangw",
      40)))

    val dataDS: Dataset[User] = data.map(x => User(x._1, x._2)).toDS()

    dataDS.createOrReplaceTempView("user")

    sc.udf.register("ageAvg", functions.udaf(new MyAgeAvg))

    sc.sql(
      """
        |select ageAvg(age) from user
        |""".stripMargin).show()


    sc.close()

  }
  case class User(name: String, age: Int)
  case class Buff(var sum: Long, var cnt: Long)


  case class MyAgeAvg() extends Aggregator[Long, Buff, Double]{
    override def zero: Buff = {
      Buff(0, 0)
    }

    override def reduce(b: Buff, a: Long): Buff = {
      b.sum += a
      b.cnt += 1
      b
    }

    override def merge(b1: Buff, b2: Buff): Buff = {
      b1.sum += b2.sum
      b1.cnt += b2.cnt
      b1
    }

    override def finish(reduction: Buff): Double = {
      reduction.sum.toDouble / reduction.cnt
    }

    override def bufferEncoder: Encoder[Buff] = {
      Encoders.product
    }

    override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
  }
}
