package cn.itcast.tags.models.statistics

import cn.itcast.tags.models.{AbstractModel, ModelType}
import cn.itcast.tags.tools.TagTools
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types.StringType

class ConsumeCycleModel2 extends AbstractModel("ConsumeCycleModel2",ModelType.STATISTICS){
  override def doTag(businessDF: DataFrame, tagDF: DataFrame): DataFrame = {

    import businessDF.sparkSession.implicits._
    import org.apache.spark.sql.functions._

    val attrTagDF: DataFrame = TagTools.convertTuple(tagDF)

    /**
     * +-----+-----+---+
     * |tagId|start|end|
     * +-----+-----+---+
     * |342  |0    |7  |
     * |343  |8    |14 |
     */
    attrTagDF.printSchema()
    attrTagDF.show(10,false)

    /**
     * +--------+----------+
     * |memberid|finishtime|
     * +--------+----------+
     * |13      |1590249600|
     * |578     |1591545600|
     */


    /**
     * +---+-------------------+
     * |uid|finish_time        |
     * +---+-------------------+
     * |1  |2020-06-12 00:00:00|
     */
    /**
     * +---+----+
     * |uid|days|
     * +---+----+
     * |1  |75  |
     * |102|75  |
     */
    val daysDF: DataFrame = businessDF
      .groupBy($"memberid")
      .agg(from_unixtime(max($"finishtime")).as("finish_time"))
      .select(
        $"memberid".as("uid"),
        datediff(current_timestamp(),$"finish_time").as("days")
      )

    daysDF.printSchema()
    daysDF.show(10,false)

    val modelDF: DataFrame = daysDF.join(attrTagDF)
      .where(daysDF("days").between(attrTagDF("start"), attrTagDF("end")))
      .select(
        $"uid",
        $"tagId".cast(StringType)
      )

    modelDF.printSchema()
    modelDF.show(10,false)

    println(modelDF.count())

    modelDF
  }
}

object ConsumeCycleModel2{
  def main(args: Array[String]): Unit = {
    val consumeCycleModel = new ConsumeCycleModel2()
    consumeCycleModel.executeModel(341L,false)
  }
}

