package homework

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo7Work {
  def main(args: Array[String]): Unit = {

    /**
     *
     * 建立连接
     */
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("plants")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    /**
     * 读取数据
     */
    import org.apache.spark.sql.functions._
    import spark.implicits._

    val user_low_carbon_df: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("user_id STRING,date_dt STRING,low_carbon INT")
      .load("data/ant_user_low_carbon.txt")

    user_low_carbon_df
      .where($"date_dt".substr(0,4) ==="2017")
      .groupBy($"user_id", $"date_dt")
      .agg(sum($"low_carbon") as "sum_low_carbon")
      .where($"sum_low_carbon" > 100)
      .withColumn("ahead_date_dt", regexp_replace($"date_dt", "/", "-"))
      .withColumn("rn", row_number() over Window.partitionBy($"user_id").orderBy("date_dt"))
      .withColumn("date_dt-rn", expr("date_sub(ahead_date_dt,rn)"))
      .withColumn("counts", count($"date_dt-rn") over Window.partitionBy($"user_id", $"date_dt-rn"))
      .where($"counts" >= 3)
      .join(user_low_carbon_df, List("user_id", "date_dt"))
      .select("user_id", "date_dt", "low_carbon")
      .show(100)
  }
}
