package com.hw

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo9Work {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .config("spark.sql.shuffle.partitions",1)
      .master("local")
      .appName("works")
      .getOrCreate()

    val work: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id string,burk string ,tTime string")
      .load("data/works.txt")

    import org.apache.spark.sql.functions._
    import spark.implicits._
    work
      .withColumn("tDate",date_format($"tTime","yy-MM-dd"))
      //取上一家公司
      .withColumn("last_burk",lag($"burk",1) over Window.partitionBy($"id").orderBy($"tDate"))
      //将间隔时间打上标记
      .withColumn("flag",when($"burk"===$"last_burk",0).otherwise(1))
      //将标记累加形成分区
      .withColumn("res_flag",sum($"flag") over Window.partitionBy($"id").orderBy("tDate"))
      //取分区内最小的时间为开始时间
      .withColumn("start_time",min($"tDate") over Window.partitionBy($"id",$"res_flag"))
      //取分区内最大的时间为结束
      .withColumn("end_time",max($"tDate") over Window.partitionBy($"id",$"res_flag"))
      //选取字段 去重
      .select($"id",$"start_time",$"end_time",$"burk")
      .distinct()
      .show()



  }

}
