package com.example.bigdata.spark.UDF

import java.util

import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types._

object sparkDemo {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession
                .builder()
                .master("local[1]")
                .appName("UDF Demo")
                .getOrCreate()
        UDF(spark)
        spark.stop()
    }

    /**
      * from_unixtime函数可以将时间戳(秒) 格式化为字符串输出
      */
    def UDF(spark:SparkSession): Unit ={
        import spark.implicits._
        val schema = StructType(List(
            StructField("name",StringType,nullable = false),
            StructField("age",IntegerType,nullable = false),
            StructField("create_time",LongType,nullable = false)
        ))
        val javaList = new util.ArrayList[Row]()
        javaList.add(Row("Alice",20,System.currentTimeMillis()/1000))
        javaList.add(Row("Tom",18,System.currentTimeMillis()/1000))
        javaList.add(Row("Boris",30,System.currentTimeMillis()/1000))
        val df1 = spark.createDataFrame(javaList,schema)
        df1.show()
        df1.createTempView("t_user")
        spark.sql("select name,age,from_unixtime(create_time,'yyyy-MM-dd HH:mm:ss') from t_user").show

        spark.udf.register("toUpperCaseUDF",(column:String) => column.toUpperCase())
        spark.sql("select toUpperCaseUDF(name),age,from_unixtime(create_time,'yyyy-MM-dd HH:mm:ss') from t_user").show()



    }

}
