package day07

import org.apache.spark.sql.{DataFrame, SparkSession}

object UDAFDemo {
  def main(args: Array[String]): Unit = {
    /**
      * 实验wordcount的方法有很多：原生语言实现，Actor方式实现、spark的算子实现、累加器实现、sparkSQL实现，UDAF实现。
      * 使用UDAF实现wordcount。统计数据Array("dazhao","yadong","dazhao","yadong","xiaodong","dazhao")
      */
    val spark: SparkSession = SparkSession.builder().appName("UDAFDemo").master("local[2]").getOrCreate()
    //手动创建数据然后封装成样例类，转换成DF类型数据
    val names = Array("dazhao","yadong","dazhao","yadong","xiaodong","dazhao")
    val namesDF: DataFrame = spark.createDataFrame(names.map(x => PersonName2(x)))    //map可简写为map(PersonName2)
    //将DF数据注册临时视图
    namesDF.createOrReplaceTempView("t_person")
    //注册UDAF，与注册UDF相同，也是用spark.udf.register()，只不过该类要继承UserDefinedAggregateFunction实现一些方法
    spark.udf.register("wc", new PersonWCUDAF)
    //开始查询
    spark.sql("select name, wc(name) from t_person group by name").show()
  }
}
case class PersonName2(name: String)
