import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{col, expr, isnull, lag, lead}
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession}

import java.time.LocalDate
import scala.reflect.internal.Flags.CASE
import scala.xml.dtd.ContentModelParser.END

case class Person(ID:Int, startdate:String, enddate:String)

object Demo1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("Demo1")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("warn")

    import spark.implicits._
//    val arr2 = Array(("Jack", 28, 150), ("Tom", 10, 144), ("Andy", 16, 165))
//    val rddToDF: DataFrame = sc.makeRDD(arr2).toDF("name", "age", "height")
//    rddToDF.orderBy("age").show(10)
//    rddToDF.orderBy(desc("age")).show(10)

//    val arr2 = Array(("Jack", 28, 150), ("Tom", 10, 144), ("Andy", 16, 165))
//    val rdd2: RDD[Person] = spark.sparkContext.makeRDD(arr2).map(f=>Person(f._1, f._2, f._3))
//    val ds2 = rdd2.toDS()			// 反射推断，spark 通过反射从case class的定义得到类名
//    val df2 = rdd2.toDF()			// 反射推断
//    ds2.printSchema
//    df2.printSchema
//    ds2.orderBy(desc("name")).show(10)
//    df2.orderBy(desc("name")).show(10)

    /*val df1: DataFrame = spark.read.csv("data/people1.csv")
    df1.printSchema()
    df1.show()

    val df2: DataFrame = spark.read.csv("data/people2.csv")
    df2.printSchema()
    df2.show()


    val df3: DataFrame = spark.read
        .options(Map(("header", "true"), ("inferschema", "true")))
      .csv("data/people1.csv")
    df3.printSchema()
    df3.show()*/

    // Spark 2.3.0
    val schemaStr = "ID int, startdate string, enddate string"
    val df4: DataFrame = spark.read
        //.option("header", "true")
      .option("delimiter", " ")
      .schema(schemaStr)
      .csv("data/date.csv")
    df4.printSchema()
    df4.show()
    println("___________________")
    val frame = df4.select(col("startdate"), col("enddate"))
    val newRdd = frame.rdd.map(a => (
      a.getString(0) + " " + a.getString(1)
      )).flatMap(_.split("\\s+")).sortBy(f =>f).map(a=>(1,a))
    val ds = spark.createDataset(newRdd)
    val df = ds.toDF("sequence","dateStr")

    val w2 = Window.partitionBy("sequence").orderBy("dateStr")
    df.createTempView("books")

    val frame1 = df.select($"dateStr", lead("dateStr", 1).over(w2).alias("nextDate"))
    def isnull(firstValue: String,secValue:String):String = {
      if (firstValue == null){
        return secValue
      }else return firstValue
    }


    //spark.udf.register("isnull", isnull _)
    import org.apache.spark.sql.functions._
    val def_isnull = udf(isnull _)
    //df.createTempView("books")

    //spark.sql("select sequence,dateStr from books").show()
    frame1.select($"dateStr",def_isnull($"nextDate",$"dateStr")).show()
    //map(a=>LocalDate.parse(a)).collect()

    spark.close()
  }

}