package cn.itcast.tags.models.statistics
import cn.itcast.tags.models.{AbstractModel, ModelType}
import cn.itcast.tags.tools.TagTools
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.StringType
class ConsumeCycleModel extends AbstractModel ("ConsumeCycleModel",ModelType.STATISTICS){
  override def doTag(businessDF: DataFrame, tagDF: DataFrame): DataFrame = {
    import businessDF.sparkSession.implicits._
    import org.apache.spark.sql.functions._

    val attrTagDF: DataFrame = TagTools.convertTuple(tagDF)

    businessDF.printSchema()
    businessDF.show(10,false)
    val daysDF: DataFrame = businessDF
      // i. 按照用户ID分组，获取最近订单，finishtime最大值的订单
      .groupBy($"memberid")
      .agg(max($"finishtime").as("max_finishtime"))
      // ii. 转换finishtime为日期时间格式
      .select(
        $"memberid".as("uid"), //
        from_unixtime($"max_finishtime").as("finish_time"), //
        current_timestamp().as("now_time")
      )
      // iii. 计算天数
      .select(
        $"uid",  //
        datediff($"now_time", $"finish_time").as("consumer_days")
      )

    val modelDF: DataFrame = daysDF.join(attrTagDF)
      .where($"consumer_days".between(attrTagDF("start"), attrTagDF("end")))
      .select(
        $"uid",
        $"tagId".cast(StringType)
      )
    modelDF
  }
}
object ConsumeCycleModel{
  def main(args: Array[String]): Unit = {
    val tagModel: ConsumeCycleModel = new ConsumeCycleModel()
    tagModel.executeModel(341L)
  }
}