package homework

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 *
 * 一个人的工作经历
 *
 */
object Demo8SheBao {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("Demo8SheBao")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._

    val SheBaoDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("pid STRING,ComName STRING ,InTime STRING")
      .load("data/shebao.txt")

    SheBaoDF
      .withColumn("rn",lag($"InTime",1) over Window.partitionBy($"pid",$"ComName").orderBy($"InTIme"))
      .withColumn("InTime-rn",datediff($"InTime",$"rn"))
      .withColumn("InTime-rn",when($"InTime-rn".isNull or $"InTime-rn" > 32,1).otherwise(0))
      .withColumn("InTime-rn",sum($"InTime-rn") over Window.partitionBy($"pid").orderBy($"InTime"))
      .groupBy($"pid",$"InTime-rn",$"ComName")
      .agg(min($"InTime") as "start-date",max($"InTime") as "end-date")
      .select("pid","ComName","start-date","end-date")
      .show()


  }
}
