package cn.itcast.model.statistics

import cn.itcast.model.base.BaseModel
import org.apache.spark.sql.{Column, DataFrame, Dataset, Row}

object BirthdayModel extends BaseModel{
  override def setAppName(): String = "BaseModel"

  override def importSparkEnv(): Unit={}
    import spark.implicits._
    import org.apache.spark.sql.functions._
  override def setTagId(): Int = 395

  override def computeTag(fiveRuleDF: DataFrame, hbaseSource: DataFrame): DataFrame = {
    val birthday: Column = regexp_replace(hbaseSource.col("birthday"), "-", "")
    val birthdayDF: DataFrame = hbaseSource.select('id.as("userId"), birthday.as("birthday"))
    val fiveDF: DataFrame = fiveRuleDF.map(row => {
      val id: String = row.getAs[Int]("id").toString
      val rule: String = row.getAs[String]("rule").toString
      // 将时间段切分为2列
      val arr: Array[String] = rule.split("-")
      val start = arr(0)
      val end = arr(1)
      (id, start, end)
    }).collect().toList.toDF("tagIds", "start", "end")
    val joinDF: Dataset[Row] = birthdayDF.join(fiveDF)
      .where('birthday.between('start, 'end))
    val newDF: DataFrame = joinDF.select('userId, 'tagIds)
    newDF
  }
  def main(args: Array[String]): Unit = {
    // 执行整体流程,注意此方法中在测试期间一定要将数据落地的代码注释掉.否则上面会报错.
    val newDF: DataFrame = executeModel()
    saveData(newDF)
  }
}
