package com.larry.spark.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession, TypedColumn, expressions}

object Sql_Oper_UDF_2 {

  def main(args: Array[String]): Unit = {
    //TODO  使用spark coalesce  缩减分区
    //TODO  默认情况下 缩减分区不会shuffle

    val conf = new SparkConf().setMaster("local[*]").setAppName("sql")

    //创建session对象
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    import spark.implicits._

    //读取json
    val df = spark.read.json("input/user.json")

    df.createOrReplaceTempView("user")

    val ds: Dataset[User01] = df.as[User01]
    val myAvgUDAF = new MyAvgUDAF1
    val column: TypedColumn[User01, Double] = myAvgUDAF.toColumn

    ds.select(column).show()

    //关闭资源
    spark.stop()

  }
}
class MyAvgUDAF1 extends Aggregator[User01,AgeBuffer,Double]{

  //初始值
  override def zero: AgeBuffer = {
    AgeBuffer(0L,0L)
  }

  override def reduce(b: AgeBuffer, a: User01): AgeBuffer = {
    b.sum = b.sum + a.age
    b.count = b.count + 1
    b
  }

  override def merge(b1: AgeBuffer, b2: AgeBuffer): AgeBuffer = {
    b1.sum = b1.sum + b2.sum
    b1.count = b1.count + b2.count
    b1
  }

  override def finish(reduction: AgeBuffer): Double = {
    reduction.sum.toDouble / reduction.count
  }

  override def bufferEncoder: Encoder[AgeBuffer] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}

case class User01(username:String,age:Long)
//缓存类型
case class AgeBuffer(var sum:Long,var count:Long)
