package cn.whuc.sparksql

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{DataFrame, Encoder, Encoders, SparkSession, functions}

object Demo_readFile {
  def main(args: Array[String]): Unit = {
    val ss: SparkSession =
      SparkSession.builder().config(new SparkConf().setMaster("local[*]").setAppName("")).getOrCreate()

    val datas: RDD[String] = ss.sparkContext.textFile("input/primary_midsemester.txt")

    val df: DataFrame = ss.read.option("header", "true").option("inferSchema", "true").option("delimiter"," ").csv("input/primary_midsemester.csv")

    df.createOrReplaceTempView("t1")

    ss.udf.register("avgScore",functions.udaf(new AvgScoreUDAF()))

    ss.sql("select gender,avg(Chinese),avg(English),avg(Math) from t1 group by gender").show()
    ss.sql("select gender,avgScore(Chinese,English,Math) avgScore from t1 group by gender").show()

//    df.show()

    ss.stop()
  }
}

case class AvgScpreBuffer(var sum:Long,var count:Long)

class AvgScoreUDAF extends Aggregator[(Long,Long,Long),AvgScpreBuffer,Double] {


  override def zero: AvgScpreBuffer = AvgScpreBuffer(0L,0L)

  override def reduce(b: AvgScpreBuffer, a: (Long, Long, Long)): AvgScpreBuffer = {
    b.sum=b.sum+a._1+a._2+a._3
    b.count+=1
    b
  }

  override def merge(b1: AvgScpreBuffer, b2: AvgScpreBuffer): AvgScpreBuffer = {
    b1.sum+=b2.sum
    b1.count+=b2.count
    b1
  }

  override def finish(reduction: AvgScpreBuffer): Double = {
    reduction.sum/reduction.count.toDouble
  }

  override def bufferEncoder: Encoder[AvgScpreBuffer] = Encoders.product

  override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}