package cn.darksoul3.spark.udf

import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, Encoders, SparkSession}

object UDAF {

  def main(args: Array[String]): Unit = {
    val ss: SparkSession = SparkSession.builder().appName("ReadTsv").master("local[*]").getOrCreate()

    val titles: DataFrame = ss.read.format("csv")
      .option("inferSchema", "true")
      .option("sep", "\t")
      .option("header", "true")
      .load("C:\\Users\\cary2\\Desktop\\title.basics.tsv")

    titles.createTempView("v_titles")

    import org.apache.spark.sql.functions._
    val avg = new Aggregator[Int,(Long,Int),Long] {
      override def zero: (Long, Int) = (0L,0)

      override def reduce(b: (Long, Int), a: Int): (Long, Int) = {(b._1 + a, b._2 + 1)}

      override def merge(b1: (Long, Int), b2: (Long, Int)): (Long, Int) = {(b1._1 + b2._1,b1._2 + b2._2)}

      override def finish(reduction: (Long, Int)): Long = {reduction._1 / reduction._2}

      override def bufferEncoder: Encoder[(Long, Int)] = {Encoders.tuple(Encoders.scalaLong,Encoders.scalaInt)}

      override def outputEncoder: Encoder[Long] = Encoders.scalaLong
    }

    ss.udf.register("avg",udaf(avg))

    ss.sql(
      """
        |select titleType,avg(startYear) avg from v_titles group by titleType
        |""".stripMargin).show()

    ss.stop()
  }


}
