package com.shujia.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo13SheBao {
  def main(args: Array[String]): Unit = {
    val sparkSession: SparkSession = SparkSession.builder()
      .master("local")
      .appName("作业社保演示")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import sparkSession.implicits._
    import org.apache.spark.sql.functions._

    //读取数据
    val sheBaoDF: DataFrame = sparkSession.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,burk STRING,sdate STRING")
      .load("spark/data/shebao.txt")

    //统计每个员工是工作经历
    sheBaoDF
      //取出员工上一个月所在公司
      .withColumn("last_burk", lag($"burk", 1) over Window.partitionBy($"id").orderBy($"sdate"))
      //.show()
      //在每一行后新增一列，标记列，如果换工作了，标记1 否则标记0
      .withColumn("flag", when($"burk" === $"last_burk", 0).otherwise(1))//.show()
      //以用户开窗，将后面的flag值加起来
      .withColumn("tmp",sum($"flag") over Window.partitionBy($"id").orderBy($"sdate"))//.show()
      .groupBy($"id",$"burk",$"tmp")
      .agg(min($"sdate") as "start_date",max($"sdate") as "end_start")
      .show(100)
  }
}
