package sparksqls.customizedfunction

import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
import org.apache.spark.SparkConf


object CustomizedAggregator {

  private val conf: SparkConf = new SparkConf()
    .setMaster("local[*]")
    .setAppName("sparkCustomizedFunction")
  private val spark: SparkSession = SparkSession.builder()
    .config(conf)
    .getOrCreate()
  private val array: Array[(String, Int)]
  = Array(("Zhangsan", 19), ("Lisi", 21), ("Hanmeimei", 23))

  import spark.implicits._

  def main(args: Array[String]): Unit = {
    // Person bean structure
    // name-String, age-Integer
    val dataset: Dataset[Person2] = spark.sparkContext.parallelize(array)
      .map(f => Person2(f._1.trim, f._2))
      .toDF("name", "age")
      .as[Person2]
    // apply customized average function into dataset
    val average = new CustomizedAverage
    val col = average.toColumn.name("average age")
    dataset.select(col).show()
  }
}

case class Person2(var name: String, var age: Int)

case class AgeBuffer(var sum: Long, var count: Long)

