package com.shujia.sql

import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Demo9UDF {
  def main(args: Array[String]): Unit = {

    //1、创建Spark sql环境
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("sql")
      .config("spark.sql.shuffle.partitions", 1) //默认在集群中时200个
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //读取数据
    val linesDF: DataFrame = spark.read
      .schema("line STRING")
      .option("sep", "|")
      .csv("spark/data/words.txt")


    //定义UDF
    val str_split: UserDefinedFunction = udf((line: String, sep: String) => {
      line.split(sep)
    })

    //在使用DSL时，使用自定义函数
    linesDF.select(explode(str_split($"line", expr("','")))).show()

    /**
     * 在 sql中使用自定义函数
     */
    linesDF.createOrReplaceTempView("lines")

    //注册自定义函数
    spark.udf.register("str_split", str_split)

    spark.sql(
      """
        |select * from
        |lines
        |lateral view explode(str_split(line,',')) T as word
        |
        |""".stripMargin).show()

  }
}
