package com.shujia.sql

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.functions.udf

object Test {
  def main(args: Array[String]): Unit = {
    //1、创建Spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[2]")
      .appName("sql")
      .config("spark.sql.shuffle.partitions", 1) //默认在集群中时200个
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //定义UDF
    val str_split: UserDefinedFunction = udf((line: String) => {
      "数加:"+line
    })

//    val value2: Broadcast[UserDefinedFunction] = spark.sparkContext.broadcast(str_split)


//    spark.udf.register("str_split", str_split)
    /**
     * 1、在使用DSL的时候使用自定义函数
     */
    val studentsDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("spark/data/students.csv")

    //在使用DSL时，使用自定义函数
//    studentsDF.select(str_split($"clazz")).show()
    studentsDF.createOrReplaceTempView("lines")

    spark.udf.register("str_split", (line: String) => "数加:"+line)

    spark.sql(
      """
        |select str_split(clazz) from lines
        |""".stripMargin).show()

    /**
     * 在 sql中使用自定义函数
     */
//    studentsDF.createOrReplaceTempView("lines")
//
//    //注册自定义函数
//    spark.udf.register("str_split", (line: String) => "数加:"+line)
//
//    spark.sql(
//      """
//        |select * from
//        |lines
//        |lateral view explode(str_split(line,',')) T as word
//        |
//        |""".stripMargin).show()

  }
}
