package cn.itcast.tags.models.ml

import cn.itcast.tags.config.ModelConfig
import cn.itcast.tags.models.{AbstractModel, ModelType}
import cn.itcast.tags.tools.{MLModelTools, TagTools}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.{Model, linalg}
import org.apache.spark.ml.clustering.KMeansModel
import org.apache.spark.ml.feature.{MinMaxScaler, MinMaxScalerModel, VectorAssembler}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._


class RfeModel extends AbstractModel("用户活跃度",ModelType.ML){
  override def doTag(businessDF: DataFrame, tagDF: DataFrame): DataFrame = {
    //global_user_id|loc_url|log_time
    import businessDF.sparkSession.implicits._
    val session: SparkSession = businessDF.sparkSession

    val rfeDF: DataFrame = businessDF
      .groupBy($"global_user_id")
      .agg(
        max($"log_time").as("last_time"),
        count($"loc_url").as("frequency"), //F:点击量
        countDistinct($"loc_url").as("engagements") //E：访问量
      )
      .select(
        $"global_user_id".as("userId"),
        datediff(
          date_sub(current_timestamp(), 850), $"last_time"
        ).as("recency"), //R
        $"frequency",
        $"engagements"
      )

    /*计算R值：
        最近一次访问时间，距离今天的天数 - max -> datediff
    计算F值：
        所有访问浏览量（PV） - count
    计算E值：
        所有访问页面量（不包含重复访问页面）（UV） - count distinct
      R：0-15天=5分，16-30天=4分，31-45天=3分，46-60天=2分，大于61天=1分
      F：≥400=5分，300-399=4分，200-299=3分，100-199=2分，≤99=1分
      E：≥250=5分，200-249=4分，150-199=3分，149-50=2分，≤49=1分
     */
    val rWhen=when(col("recency").between(1,15),5.0)
      .when(col("recency").between(16,30),4.0)
      .when(col("recency").between(31,45),3.0)
      .when(col("recency").between(46,60),2.0)
      .when(col("recency").geq(61),1.0)

    val fWhen=when(col("frequency").geq(400),5.0)
      .when(col("frequency").between(300,399),4.0)
      .when(col("frequency").between(200,299),3.0)
      .when(col("frequency").between(100,199),2.0)
      .when(col("frequency").leq(99),1.0)

    val eWhen=when(col("engagements").geq(250),5.0)
      .when(col("engagements").between(200,249),4.0)
      .when(col("engagements").between(150,199),3.0)
      .when(col("engagements").between(50,149),2.0)
      .when(col("engagements").leq(49),1.0)

    val rfeScoreDF: DataFrame = rfeDF.select(
      $"userId",
      rWhen.as("r_score"),
      fWhen.as("f_score"),
      eWhen.as("e_score")
    )

    //提取特征
    val assembler: VectorAssembler = new VectorAssembler()
      .setInputCols(Array("r_score", "f_score", "e_score"))
      .setOutputCol("features")
    val rfeFeaturesDF: DataFrame = assembler.transform(rfeScoreDF)

    //特征工程
    /*val minMaxScalerModel: MinMaxScalerModel = new MinMaxScaler()
      .setInputCol("raw_features")
      .setOutputCol("features")
      .fit(rawFeatureDF)*/


    //val rfeFeaturesDF: DataFrame = minMaxScalerModel.transform(rawFeatureDF)

    //rfeScoreDF.show(10,false)
    val modelPath: String = s"${ModelConfig.MODEL_BASE_PATH}${this.getClass.getSimpleName.stripSuffix("$")}"
    val kMeansModel: KMeansModel = MLModelTools.loadModel(rfeFeaturesDF,
      "rfe", this.getClass).asInstanceOf[KMeansModel]

    //使用模型预测
    val predictionDF: DataFrame = kMeansModel.transform(rfeFeaturesDF)
    predictionDF.show(10,false) //预测值为列簇中心点的索引

    //获取模型中类簇中心店
    val centers: Array[linalg.Vector] = kMeansModel.clusterCenters
    centers.foreach(println)

    val indexTagMap: Map[Int, String] = TagTools.convertIndexMap(centers, tagDF)

    //使用预测值打标签
    val indexTagMapBroadcast: Broadcast[Map[Int, String]] = session.sparkContext.broadcast(indexTagMap)
    val index_to_tag = udf(
      (clusterIndex:Int)=>indexTagMapBroadcast.value(clusterIndex)
    )
    val modelDF: DataFrame = predictionDF.select(
      $"userId",
      index_to_tag($"prediction").as("rfe")
    )

    modelDF.show(100,false)

    null
  }
}

object RfeModel{
  def main(args: Array[String]): Unit = {
    val model = new RfeModel
    model.executeModel(367L,false)
  }
}
