package day08

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Encoder, Encoders, Row, SparkSession, TypedColumn}
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DoubleType, IntegerType, StructField, StructType}
import org.apache.spark.util.AccumulatorV2
import org.apache.spark.{SparkConf, SparkContext}

/**
 * 2.7	用户自定义函数
 *
 * 2.7.1	UDF
 * 输入一行，返回一个结果。在Shell窗口中可以通过spark.udf功能用户可以自定义函数。
 *
 * 1)	创建DataFrame
 * scala> val df = spark.read.json("/opt/module/spark-local/people.json")
 * df: org.apache.spark.sql.DataFrame = [age: bigint， name: string]
 *
 * 2)	打印数据
 * scala> df.show
 * +---+--------+
 * |age|    name|
 * +---+--------+
 * | 18|qiaofeng|
 * | 19|  duanyu|
 * | 20|   xuzhu|
 * +---+--------+
 *
 * 3)	注册UDF，功能为在数据前添加字符串
 * scala> spark.udf.register("addName",(x:String)=> "Name:"+x)
 * res9: org.apache.spark.sql.expressions.UserDefinedFunction = UserDefinedFunction(<function1>,StringType,Some(List(StringType)))
 *
 * 4)	创建临时表
 * scala> df.createOrReplaceTempView("people")
 *
 * 5)	应用UDF
 * scala> spark.sql("Select addName(name),age from people").show()
 * +-----------------+---+
 * |UDF:addName(name)|age|
 * +-----------------+---+
 * |    Name:qiaofeng| 18|
 * |      Name:duanyu| 19|
 * |       Name:xuzhu| 20|
 * +-----------------+---+
 *
 * 2.7.2	UDAF
 * 输入多行,返回一行。强类型的Dataset和弱类型的DataFrame都提供了相关的聚合函数，
 * 如 count()，countDistinct()，avg()，max()，min()。除此之外，用户可以设定自己
 * 的自定义聚合函数。通过继承UserDefinedAggregateFunction来实现用户自定义聚合函数。
 *
 * 需求：实现求平均年龄
 *
 * 一、RDD算子方式实现
 */
object Spark_SQL_4 {
  def main(args: Array[String]): Unit = {
    //1.创建SparkConf并设置App名称
    val conf: SparkConf = new SparkConf().setAppName("SparkCoreTest").setMaster("local[*]")

    //2.创建SparkContext，该对象是提交Spark App的入口
    val sc: SparkContext = new SparkContext(conf)

    val rdd: RDD[(String, Int)] = sc.makeRDD(List(("zhangsan", 20), ("lisi", 30), ("wangwu", 40)))

    val tuple: (Int, Int) = rdd.map {
      case (_, age) => (1, age)
    }.reduce((t1, t2) => (t1._1 + t2._1, t1._2 + t2._2))

    val avgAge: Double = tuple._2.toDouble / tuple._1
    println(avgAge)

    // 关闭连接
    sc.stop()
  }
}

/**
 * 二、自定义累加器实现（减少Shuffle）提高效率（模仿LongAccumulator累加器）
 */
object Spark_SQL_4_1 {
  def main(args: Array[String]): Unit = {
    //1.创建SparkConf并设置App名称
    val conf: SparkConf = new SparkConf().setAppName("SparkCoreTest").setMaster("local[*]")

    //2.创建SparkContext，该对象是提交Spark App的入口
    val sc: SparkContext = new SparkContext(conf)

    val rdd: RDD[(String, Int)] = sc.makeRDD(List(("zhangsan", 20), ("lisi", 30), ("wangwu", 40)))

    val accumulator: AvgAccumulator = new AvgAccumulator

    //3.注册累加器
    sc.register(accumulator)

    //4.调用累加器的add方法
    rdd.foreachPartition(iterator => iterator.foreach(accumulator.add))

    //5.打印结果
    println(accumulator.value)

    //6.关闭连接
    sc.stop()
  }
}

class AvgAccumulator extends AccumulatorV2[(String, Int), Double] {

  var sum: Int = 0

  var count: Int = 0

  override def isZero: Boolean = sum == 0 && count == 0

  override def copy(): AccumulatorV2[(String, Int), Double] = {
    val accumulator: AvgAccumulator = new AvgAccumulator
    accumulator.sum = sum
    accumulator.count = count
    accumulator
  }

  override def reset(): Unit = {
    sum = 0
    count = 0
  }

  override def add(v: (String, Int)): Unit = {
    sum = sum + v._2
    count = count + 1
  }

  override def merge(other: AccumulatorV2[(String, Int), Double]): Unit = {
    other match {
      case o: AvgAccumulator =>
        sum = sum + o.sum
        count = count + o.count
      case _ =>
    }
  }

  override def value: Double = sum.toDouble / count
}

/**
 * 三、自定义聚合函数实现-弱类型（应用于SparkSQL更方便）
 */
object Spark_SQL_4_2 {
  def main(args: Array[String]): Unit = {

    // 创建上下文环境配置对象
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("Spark_SQL")

    // 创建SparkSession对象
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    // 创建聚合函数
    val averageUDAF: MyAverageUDAF = new MyAverageUDAF

    // 注册聚合函数
    spark.udf.register("avgAge", averageUDAF)

    // 读取数据
    val dataFrame: DataFrame = spark.read.json("input/user.json")

    // 创建临时视图
    dataFrame.createOrReplaceTempView("user")

    // 使用自定义函数查询
    val frame: DataFrame = spark.sql("select avgAge(age) from user")
    frame.show()

    // 关闭sparkSession
    spark.stop()

  }
}

/**
 * 用户自定义聚合函数求平均值
 */
class MyAverageUDAF extends UserDefinedAggregateFunction {

  /**
   * 集合函数输入参数的数据类型
   *
   * @return StructType
   */
  override def inputSchema: StructType = StructType(Array(StructField("age", IntegerType)))

  /**
   * 聚合函数缓冲区中值的数据类型(age, count)
   *
   * @return StructType
   */
  override def bufferSchema: StructType = StructType(Array(StructField("sum", IntegerType), StructField("count", IntegerType)))

  /**
   * 自定义聚合函数返回值类型
   *
   * @return DataType
   */
  override def dataType: DataType = DoubleType

  /**
   * 如果此函数是确定性的，则返回true，即给定相同的输入，则始终返回相同的输出
   *
   * @return
   */
  override def deterministic: Boolean = true

  /**
   * 初始化给定的聚合缓冲区，即聚合缓冲区的零值。
   *
   * @param buffer <br>
   */
  override def initialize(buffer: MutableAggregationBuffer): Unit = {
    // 存年龄的总和
    buffer(0) = 0
    // 存年龄的个数
    buffer(1) = 0
  }

  /**
   * 更新缓冲区中的数据
   *
   * @param buffer <br>
   * @param input  <br>
   */
  override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
    if (!input.isNullAt(0)) {
      buffer(0) = buffer.getInt(0) + input.getInt(0)
      buffer(1) = buffer.getInt(1) + 1
    }
  }

  /**
   * 合并两个聚合缓冲区，并将更新的缓冲区值存储回buffer1
   *
   * @param buffer1 <br>
   * @param buffer2 <br>
   */
  override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
    buffer1(0) = buffer1.getInt(0) + buffer2.getInt(0)
    buffer1(1) = buffer1.getInt(1) + buffer2.getInt(1)
  }

  /**
   * 计算最终结果
   *
   * @param buffer <br>
   * @return
   */
  override def evaluate(buffer: Row): Double = {
    buffer.getInt(0).toDouble / buffer.getInt(1)
  }
}

/**
 * 三、自定义聚合函数实现-强类型（应用于DataSet的DSL更方便）
 */
object Spark_SQL_4_4 {
  def main(args: Array[String]): Unit = {

    // 创建上下文环境配置对象
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkSQL")

    // 创建SparkSession对象
    val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    // 读取数据
    val dataFrame: DataFrame = spark.read.json("input/user.json")

    // 将DataFrame转换成DataSet
    import spark.implicits._
    val dataSet: Dataset[User01] = dataFrame.as[User01]

    // 创建聚合函数
    val averageUDAF: MyAverageUDAF1 = new MyAverageUDAF1

    // 将聚合函数转换为查询的列
    val column: TypedColumn[User01, Double] = averageUDAF.toColumn

    // 查询
    val resDs: Dataset[Double] = dataSet.select(column)

    // 打印结果
    resDs.show()

    // 关闭sparkSession
    spark.stop()
  }
}

/**
 * 2.7.3	UDTF
 * 输入一行，返回多行(hive)；
 * SparkSQL中没有UDTF，spark中用flatMap即可实现该功能
 *
 */
object Spark_SQL_4_5 {

}

/**
 * 用户定义的聚合函数求平均值-强类型
 *
 * 定义类继承org.apache.spark.sql.expressions.Aggregator
 * 重写类中的方法
 */
class MyAverageUDAF1 extends Aggregator[User01, AgeBuffer, Double] {
  override def zero: AgeBuffer = {
    AgeBuffer(sum = 0, count = 0)
  }

  override def reduce(ageBuffer: AgeBuffer, user: User01): AgeBuffer = {
    ageBuffer.sum = ageBuffer.sum + user.age
    ageBuffer.count = ageBuffer.count + 1
    ageBuffer
  }

  override def merge(ageBuffer1: AgeBuffer, ageBuffer2: AgeBuffer): AgeBuffer = {
    ageBuffer1.sum = ageBuffer1.sum + ageBuffer2.sum
    ageBuffer1.count = ageBuffer1.count + ageBuffer2.count
    ageBuffer1
  }

  override def finish(ageBuffer: AgeBuffer): Double = {
    ageBuffer.sum.toDouble / ageBuffer.count
  }

  /**
   * DataSet默认的编解码器，用于序列化，固定写法
   *
   * @return
   */
  override def bufferEncoder: Encoder[AgeBuffer] = {
    Encoders.product
  }

  override def outputEncoder: Encoder[Double] = {
    Encoders.scalaDouble
  }
}

/**
 * 输入数据类型样例类
 *
 * @param username <br>
 * @param age      <br>
 */
case class User01(username: String, age: Long)

/**
 * 缓存数据类型样例类
 *
 * @param sum   年龄的和
 * @param count 年龄的次数
 */
case class AgeBuffer(var sum: Long, var count: Long)