import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._

/**
 * 6、A表有三个字段：ID、startdate、enddate，有3条数据：
 * 1 2019-03-04 2020-02-03
 * 2 2020-04-05 2020-08-04
 * 3 2019-10-09 2020-06-11
 * 写SQL（需要SQL和DSL）将以上数据变化为：
 * 2019-03-04  2019-10-09
 * 2019-10-09  2020-02-03
 * 2020-02-03  2020-04-05
 * 2020-04-05  2020-06-11
 * 2020-06-11  2020-08-04
 * 2020-08-04  2020-08-04
 */

object Subject6 {
  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)
    val spark = SparkSession
      .builder()
      .appName(this.getClass.getCanonicalName)
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._
    import spark._

    val data = List(("1", "2019-03-04", "2020-02-03"),
      ("2", "2020-04-05", "2020-08-04"),
      ("3", "2019-10-09", "2020-06-11"))

    val dateDf: DataFrame = data.toDF("ID", "startDate", "endDate")

    //flatMap将原数据中的startDate和endDate压到同一列
    val df: DataFrame = dateDf.flatMap { row => List(row.getString(1), row.getString(2)) }
      .toDF("startDate").sort('startDate)

    df.show()

    //SQL
    df.createTempView("t1")

    //按startDate排序，endDate为下一行的startDate，下一行为空则默认为startDate
    println("**************************SQL********************************")
    sql(
      """
        |select startDate,
        |       lead(startDate, 1, startDate) over(order by startDate) as endDate
        |  from t1
        |""".stripMargin
    ).show

    //DSL
    //按startDate排序，取当前行和下一行
    println("**************************DSL********************************")
    val w = Window.orderBy('startDate).rowsBetween(0, 1)
    //取当前行和下一行中的最大值，因为已经经过排序操作，最大值即为下一行的startDate，最后一行时最大值即为当前行的startDate
    df.select('startDate, max('startDate).over(w).as("endDate")).show

  }
}
