package com.xiaohu.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo14SheBao {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("社保练习")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import sparkSession.implicits._

    val df1: DataFrame = sparkSession.read
      .format("csv")
      .schema("id STRING,burk STRING,sdate STRING")
      .load("/bigdata30/shebao.txt")

    val resDF: DataFrame = df1.withColumn("before_burk", lag($"burk", 1) over Window.partitionBy($"id").orderBy($"sdate"))
      .select(
        $"id",
        $"burk",
        $"sdate",
        when($"before_burk".isNull, $"burk").otherwise($"before_burk") as "before_burk"
      ).withColumn("flag", when($"burk" === $"before_burk", 0).otherwise(1))
      .withColumn("tmp", sum($"flag") over Window.partitionBy($"id").orderBy($"sdate"))
      .groupBy($"id", $"burk", $"tmp")
      .agg(
        min($"sdate") as "start_date",
        max($"sdate") as "end_date"
      ).select($"id", $"burk", $"start_date", $"end_date")

    resDF.write
      .format("csv")
      .mode(SaveMode.Overwrite)
      .save("/bigdata30/spark_out4")



  }
}
