package homework
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}
object Demo6Work {
  def main(args: Array[String]): Unit = {

    /**
     *
     * 建立连接
     */
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("plants")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    /**
     * 读取数据
     */
    import org.apache.spark.sql.functions._
    import spark.implicits._

    val user_low_carbon_df: DataFrame = spark
      .read
      .format("csv")
      .option("sep", "\t")
      .schema("user_id STRING,date_dt STRING,low_carbon INT")
      .load("data/ant_user_low_carbon.txt")

    user_low_carbon_df
      .groupBy($"user_id")
      .agg(sum($"low_carbon") as "sum_carbon")
      .withColumn( "p002_count",when($"sum_carbon">215,floor(($"sum_carbon" - 215)/19) ).otherwise($"sum_carbon"))
      .withColumn("ahead_p002_count",lag($"p002_count",-1) over Window.orderBy($"p002_count".desc))
      .withColumn("more_p002_count",$"p002_count"-$"ahead_p002_count")
      .select($"user_id",$"sum_carbon",$"more_p002_count")
      .limit(10)
      .show(100)

  }
}
