package com.peng.sparktest.sparksql

import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object SparkSql06_Functions {
  def main(args: Array[String]): Unit = {
    val session: SparkSession = SparkSession.builder()
      .appName("functions_test")
      .master("local")
      .getOrCreate()

    session.sparkContext.setLogLevel("ERROR")

    import session.implicits._
    val dataFrame: DataFrame = List(("A", 1, 90),
      ("B", 1, 90),
      ("A", 2, 50),
      ("C", 1, 80),
      ("B", 2, 60)).toDF("name", "class", "score")
    dataFrame.createTempView("datas")

    import session.sql
    println("=====================1、sql函数的基本使用===============================")
    //    sql("select * from datas order by name desc,score asc").show()
    //
    //    sql("select name,sum(score) from datas group by name order by name desc").show()

    println("=====================2、自定义sql函数的基本使用===============================")
    //UDF
    //    session.udf.register("myfunc", (x: Int) => x * 10) //只作用在每一条记录上，不能用于聚合
    //    sql("select name,myfunc(score) as name10 from datas").show()
    //
    //    //接下来使用一个自定义的聚合函数
    //    //聚合函数一定需要group by吗？ 答案是否定的，例如 sum，不进行分组，依然可以使用
    //    session.udf.register("mySum", new MyAgg)
    //    sql("select name,mySum(score) from datas group by name").show()

    println("=====================3、case-when===============================")

    //    sql("select * ," +
    //      "case  when score>80 then 'god'" +
    //      " when score>60 then 'nice'  " +
    //      " else 'dog'  " +
    //      " end  as  power " +
    //      " from datas").show()
    //
    //    //对评价进行分组统计
    //    sql("select " +
    //      "case  when score>80 then 'god'" +
    //      " when score>60 then 'nice'  " +
    //      " else 'dog'  " +
    //      " end  as  power,count(*) " +
    //      " from datas " +
    //      " group by  " +
    //      " case  when score>80 then 'god' " +
    //      " when score>60 then 'nice'  else 'dog' " +
    //      "end ").show()
    //
    //    //行转列    A 1 60   =>  A 1  和  A 60
    //    sql("select  name," +
    //      "explode" +
    //      "(" +
    //      "split(" +
    //      "concat(case class when 1 then 'VIP' else 'NoVIP' " +
    //      " end,' ',score),' ')" +
    //      ") from datas").show()

    println("=====================3、开窗函数===============================")

    //    group by 是纯聚合函数： 一组最后就一条
    // 开窗函数依赖 func 和 over()  ；  over里可以 用来对数据做分区和排序， 并对每一个分区内的数据分别执行func，
    // 最后的结果不会像group by一样，而是还是保持原有数据个数，只不过每条数据都有了属于它的分区后的计算结果
    //    sql("select *," +
    //      "rank() over(partition by class order by score desc ) as rank," +
    //      "row_number() over(partition by class order by score desc ) as row_num" +
    //      " from datas").show()


    //下面来体会一下 group by 和 开窗函数的区别

    //    //group by
    //    sql("select class,count(*) from datas group by class").show()
    //
    //    //开窗函数
    //    sql("select *,count(*) over(partition by class) as count from datas").show()


    println("=========================4、explain===================================")

    //通过此句的执行计划优化情况，可以看出，
    // 1、进行了 谓词下推的优化（将where的条件提前在查询的时候进行筛选）
    // 2、列裁剪  tb表因为查询结果没有用到score，因此优化成了根本不从tb表进行查询score字段
    //    val res: DataFrame = sql("select ta.name,ta.class " +
    //      " from (select name,class from datas) as ta " +
    //       " join (select name,score from datas) as tb " +
    //      " on ta.name=tb.name " +
    //      " where score>40")
    //    res.show()
    //    res.explain(true)

    //笛卡尔积默认不被允许，但可通过配置开启
    //    val res: DataFrame = sql("select ta.name,ta.class " +
    //      " from (select name,class from datas) as ta " +
    //      " ,(select name,score from datas) as tb " +
    //      " where score>40")
    //    res.show()
    //    res.explain(true)

    //查询结果只用了name,score字段,是tb表都有的，这时候ta就作为了小表，进行了广播行为， 遵循 小表join大表的原则
    // spark中 叫广播变量
    // mapreduce中叫 cachefile
    //    val res: DataFrame = sql("select ta.name,tb.score " +
    //      " from (select name,class from datas) as ta " +
    //      " join (select name,score from datas) as tb " +
    //      " on ta.name=tb.name " +
    //      " where score>40")
    //    res.show()
    //    res.explain(true)

    //优化后的语句，计算值是+120， 可见进行了 计算堆叠的优化操作
    val res: DataFrame = sql("select ta.name,tb.score+20+100 " +
      " from (select name,class from datas) as ta " +
      " join (select name,score from datas) as tb " +
      " on ta.name=tb.name " +
      " where score>40")
    res.show()
    res.explain(true)

  }

  //自定义聚合函数
  class MyAgg extends UserDefinedAggregateFunction {
    override def inputSchema: StructType = { //入参的schema
      StructType.apply(Array(StructField.apply("score", IntegerType, nullable = true)))
    }

    //函数内部在一组组数据进入后，会累计在一个buffer里，这里是定义buffer存储的schema（buffer相当于临时表）
    override def bufferSchema: StructType = {
      StructType.apply(
        Array(
          StructField.apply("count", IntegerType, nullable = true),
          StructField.apply("sum", IntegerType, nullable = true)
        ))
    }

    //函数返回类型
    override def dataType: DataType = DoubleType

    override def deterministic: Boolean = true //是否是幂等

    override def initialize(buffer: MutableAggregationBuffer): Unit = { //对buffer进行初始化
      buffer(0) = 0 //count列值
      buffer(1) = 0 //sum列值
    }

    override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
      //组内，一条记录调用一次
      buffer(0) = buffer.getInt(0) + input.getInt(0)
      buffer(1) = buffer.getInt(1) + 1
    }

    //合并时
    override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
      buffer1(0) = buffer1.getInt(0) + buffer2.getInt(0)
      buffer1(1) = buffer1.getInt(1) + buffer2.getInt(1)
    }

    //函数具体处理逻辑，返回值要和这里返回值保持一致
    override def evaluate(buffer: Row): Double = buffer.getInt(0) / buffer.getInt(1)
  }

}
