package sql

import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import util.SparkUtil

/**
 * 用户活跃成分分析
 * 原表：用户连续活跃区间记录表
 */
object DEMO_ADS_APL_UCA {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkUtil.getSparkSession(this.getClass.getSimpleName)
    import spark.implicits._
    // 读取用户连续活跃区间记录表的数据（模拟数据在一个csv文件中，真实应该是在hive表中）
    val rngDF: DataFrame = spark.read.option("header", value = true).csv("data/active_range/rng.dat")
    // 对数据过滤（当天活跃用户数据）
    val activeDF: Dataset[Row] = rngDF.where(" rng_end = '9999-12-31'")
    val diffDF: DataFrame = activeDF.selectExpr("guid", "datediff('2020-06-08', rng_start) as days")
//    diffDF.show()
    //根据当天日期减去区间起始的差值，来生成多条 1~差值的数据
    val resDF: DataFrame = diffDF.flatMap(row => {
      val guid: Long = row.getAs[String]("guid").toLong
      val days: Int = row.getAs[Int]("days")
      for (i <- 1 to days + 1) yield (guid, i)
    }).toDF("guid", "act_days")
    // 按活跃天数分组聚合人数
    resDF.createTempView("act")
    val res: DataFrame = spark.sql(
      """
        |select
        |"2020-06-08" as date,
        |count(guid) as users,
        |act_days
        |from act
        |group by act_days
        |""".stripMargin)
    res.show()


    spark.close()
  }

}
