package cn.itcast.tags.models.statistics

import cn.itcast.tags.models.{AbstractModel, ModelType}
import cn.itcast.tags.tools.TagTools
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.types.IntegerType

class ConsumeCycleModel extends AbstractModel("消费周期",ModelType.STATISTICS){
  override def doTag(businessDF: DataFrame, tagDF: DataFrame): DataFrame = {
    import businessDF.sparkSession.implicits._
    import org.apache.spark.sql.functions._

    tagDF.printSchema()
    businessDF.show(10,false)
    tagDF.show(10,false)


    //1.根据用户id分组获取最大的订单完成时间
    val consumerDaysDF: DataFrame = businessDF
      .groupBy($"memberid")
      .agg(max($"finishtime").as("max_finishtime"))
      //获取日期时间转换，和当前的日期时间
      .select(
        $"memberid",
        //将时间戳转换成日期类型
        from_unixtime($"max_finishtime").as("finish_time"),
        current_timestamp().as("now_time")
      )
      .select(
        $"memberid".as("id"),
        //计算当前日期与订单完成期相差天数
        datediff($"now_time", $"finish_time").as("consumer_days")
      )
    consumerDaysDF.show(100,false)

    val attrTagRuleDF: DataFrame = TagTools.convertTuple(tagDF)
    attrTagRuleDF.show(10,false)

    val modelDF: Dataset[Row] = consumerDaysDF
      .join(attrTagRuleDF)
      .where($"consumer_days".between($"start", $"end"))
      .select($"id".as("userId"),$"name".as("consumercycle"))
    modelDF.show(100,false)
    null
  }
}
object ConsumeCycleModel{
  def main(args: Array[String]): Unit = {
    val model = new ConsumeCycleModel
    model.executeModel(347L,false)
  }
}
