package com.shujia.sql

import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo12UDF {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("行转列 列转行案例演示")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import sparkSession.implicits._

    //当你配置了hdfs等一些配置文件，那么默认读取路径是hadoop的，否则是本地
    val df1: DataFrame = sparkSession.read
      .format("csv")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .option("sep", ",")
      .load("/bigdata30/students.csv")

    //    df1.select($"id",$"name",concat(expr("'数加: '"),$"name")).show()
    val yz: UserDefinedFunction = udf("数加: " + _)
//    df1.select($"id", $"name", yz($"name")).show()

    df1.createOrReplaceTempView("students")
    //将自定义的函数变量注册成sql语句中的函数
    sparkSession.udf.register("syh",yz)
    sparkSession.sql(
      """
        |select
        |id,
        |name,
        |syh(name) as new_name
        |from
        |students
        |""".stripMargin).show()

  }
}
