package com.spark.sql

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo12SheBao {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("Demo12SheBao")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import org.apache.spark.sql.functions._
    import spark.implicits._

    //读取数据
    val sheBaoDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING, burk STRING , sdate STRING")
      .load("data/shebao.txt")

    /**
     * 在时间线上聚类
     */
    sheBaoDF
      //取这个人上一个月所在的公司
      .withColumn("last_burk", lag($"burk", 1) over Window.partitionBy($"id").orderBy($"sdate"))
      //在切换公司的月份打上标记
      .withColumn("flag", when($"burk" === $"last_burk", 0).otherwise(1))
      //对后面的falg进行累加求和，等到不同的分组
      .withColumn("clazz", sum($"flag") over Window.partitionBy($"id").orderBy($"sdate"))
      //获取入职时间和离职时间
      .groupBy($"id", $"burk", $"clazz")
      .agg(min($"sdate") as "start_date", max($"sdate") as "end_date")
      .show(1000)
  }

}
