package sparkSql.dataframe

import org.apache.spark.sql._
import org.apache.spark.sql.expressions.{Aggregator, MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types.{DataType, DoubleType, LongType, StructType}


/**
  * UDF 的使用
  */
object UDF {
	
	import org.apache.spark.SparkConf
	import org.apache.spark.sql.SparkSession
	import org.apache.spark.sql.DataFrame
	
	def main(args: Array[String]): Unit = {
		val conf: SparkConf = new SparkConf().setAppName("udf").setMaster("local[*]")
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		
		// 第一步定义UDF并注册
		spark.udf.register("addName", (x: String) => "Name" + x)
		
		val df: DataFrame = spark.read.json("testData/input/word/test.json")
		df.createTempView("student")
		spark.sql("select name, age from student").show()
		
		// 第二步使用 UDF
		spark.sql("select addName(name), age from student").show()
		
		spark.stop()
	}
}

/**
  * UDAF
  */
// 声明用户自定义聚合函数
// 假设要求平均值，定义两个变量，一个累加，一个求和，最后相除
class MyAvgFunction extends UserDefinedAggregateFunction {
	
	// 函数输入的数据结构
	override def inputSchema: StructType = {
		new StructType().add("age", LongType)
	}
	
	// 计算是的数据结构
	override def bufferSchema: StructType = {
		new StructType().add("sum", LongType).add("count", LongType)
	}
	
	
	// 函数返回的数据类型
	override def dataType: DataType = DoubleType
	
	// 函数是否稳定
	override def deterministic: Boolean = true
	
	// 计算之前的缓冲区的初始化,buffer是一个数组
	override def initialize(buffer: MutableAggregationBuffer): Unit = {
		buffer(0) = 0L //sum
		buffer(1) = 0L //count
	}
	
	// 根据查询结果更新缓冲区数据
	override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
		buffer(0) = buffer.getLong(0) + input.getLong(0)
		buffer(1) = buffer.getLong(1) + 1
	}
	
	//多个节点的缓冲区合并
	override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
		// sum
		buffer1(0) = buffer1.getLong(0) + buffer2.getLong(0)
		// count
		buffer1(1) = buffer1.getLong(1) + buffer2.getLong(1)
	}
	
	// 最终计算
	override def evaluate(buffer: Row): Any = {
		buffer.getLong(0).toDouble / buffer.getLong(1)
	}
}

object UDAF {
	
	import org.apache.spark.SparkConf
	import org.apache.spark.sql.SparkSession
	import org.apache.spark.sql.DataFrame
	
	def main(args: Array[String]): Unit = {
		val conf: SparkConf = new SparkConf().setAppName("udf").setMaster("local[*]")
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		
		// 第一步定义UDF并注册
		val udaf = new MyAvgFunction()
		spark.udf.register("avgAge", udaf)
		
		val df: DataFrame = spark.read.json("testData/input/word/test.json")
		df.createTempView("student")
		
		// 第二步使用 UDAF
		spark.sql("select avgAge(age) age from student").show()
		
		spark.stop()
	}
}


/**
  * 声明用户自定义的强类型聚合函数
  * 1）继承 Aggregator[-IN, BUF, OUT]
  * 2）实现方法
  */
case class UserBean(name: String, age: Long)

case class AvgBuffer(var sum: Long, var count: Int)

class MyAvgClassFunction extends Aggregator[UserBean, AvgBuffer, Double] {
	
	// 初始化
	override def zero: AvgBuffer = {
		AvgBuffer(0, 0)
	}
	
	// 把输入的数据和缓冲区的数据做update
	override def reduce(b: AvgBuffer, a: UserBean): AvgBuffer = {
		b.sum = b.sum + a.age
		b.count = b.count + 1
		b
	}
	
	// 缓冲区的合并操作
	override def merge(b1: AvgBuffer, b2: AvgBuffer): AvgBuffer = {
		b1.sum = b1.sum + b2.sum
		b1.count = b2.count + b2.count
		b1
	}
	
	// 完成计算
	override def finish(reduction: AvgBuffer): Double = {
		reduction.sum.toDouble / reduction.count
	}
	
	// 自定义类型编码
	override def bufferEncoder: Encoder[AvgBuffer] = Encoders.product
	
	// Scala 类型编码
	override def outputEncoder: Encoder[Double] = Encoders.scalaDouble
}

/**
  * 只能用 DSL 风格查询
  */
object UDAF2 {
	
	import org.apache.spark.SparkConf
	import org.apache.spark.sql.SparkSession
	import org.apache.spark.sql.DataFrame
	
	def main(args: Array[String]): Unit = {
		val conf: SparkConf = new SparkConf().setAppName("udf").setMaster("local[*]")
		val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
		import spark.implicits._
		
		// 第一步定义UDF并注册
		val udaf = new MyAvgClassFunction()
		
		// 将聚合函数转换成查询列
		val avgCol: TypedColumn[UserBean, Double] = udaf.toColumn.name("avgAge")
		
		val df: DataFrame = spark.read.json("testData/input/word/test.json")
		
		val userDS: Dataset[UserBean] = df.as[UserBean]
		
		// 应用函数
		userDS.select("avgCol").show()
		
		spark.stop()
	}
}
