package cn.itcast.tags.models.ml

import cn.itcast.tags.config.ModelConfig
import cn.itcast.tags.models.{AbstractModel, ModelType}
import cn.itcast.tags.tools.{MLModelTools, TagTools}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.ml.clustering.KMeansModel
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
class RfeModel extends AbstractModel("",ModelType.ML){
  override def doTag(businessDF: DataFrame, tagDF: DataFrame): DataFrame = {

    val spark: SparkSession = businessDF.sparkSession
    import  spark.implicits._

    val rfeDF: DataFrame = businessDF.groupBy($"global_user_id")
      .agg(
        max($"log_time").as("last_time"),
        count($"loc_url").as("frequencey"),
        countDistinct($"loc_url").as("engagements")
      )
      .select(
        $"global_user_id".as("uid"),
        datediff(
          date_sub(current_timestamp(), 370), $"last_time"
        ).as("recency"),
        $"frequency",
        $"engagements"
      )

    val rWhen = when(col("recency").between(1, 15), 5.0) //
    .when(col("recency").between(16, 30), 4.0) //
    .when(col("recency").between(31, 45), 3.0) //
    .when(col("recency").between(46, 60), 2.0) //
    	.when(col("recency").geq(61), 1.0) //
    // F 打分条件表达式
    val fWhen = when(col("frequency").leq(99), 1.0) //
    .when(col("frequency").between(100, 199), 2.0) //
    .when(col("frequency").between(200, 299), 3.0) //
    .when(col("frequency").between(300, 399), 4.0) //
    .when(col("frequency").geq(400), 5.0) //
    // M 打分条件表达式
    val eWhen = when(col("engagements").lt(49), 1.0) //
    .when(col("engagements").between(50, 149), 2.0) //
    .when(col("engagements").between(150, 199), 3.0) //
    .when(col("engagements").between(200, 249), 4.0) //
    .when(col("engagements").geq(250), 5.0) //
    val rfeScoreDF: DataFrame = rfeDF
      .select($"uid", rWhen.as("r_score"), //
        fWhen.as("f_score"), eWhen.as("e_score")
      )

    val assembler: VectorAssembler = new VectorAssembler()
    .setInputCols(Array("r_score", "f_score", "e_score"))
    .setOutputCol("features")
    val rfeFeaturesDF: DataFrame = assembler.transform(rfeScoreDF)
    val modelPath: String = ModelConfig.MODEL_BASE_PATH + s"/${this.getClass.getSimpleName}"

    val kMeansModel: KMeansModel = MLModelTools
    	.loadModel(rfeFeaturesDF, "kmeans", modelPath).asInstanceOf[KMeansModel]

    val predictionDF: DataFrame = kMeansModel.transform(rfeFeaturesDF)

    val clusterCenters: Array[linalg.Vector] = kMeansModel.clusterCenters
    val clusterIndexArray: Array[((Double, Int), Int)] = clusterCenters
      .zipWithIndex
      .map {
        case (vector, clusterIndex) => {
          (vector.toArray.sum, clusterIndex)
        }
      }
      .sortBy(-_._1)
      .zipWithIndex

    val ruleMap: Map[String, Long] = TagTools.convertMap(tagDF)
    val clusterTagMap: Map[Int, Long] = clusterIndexArray.map {
      case ((vector: Double, clusterIndex: Int), index: Int) => {
        (clusterIndex, ruleMap(index.toString))
      }
    }.toMap

    val clusterTagMapBroadcast: Broadcast[Map[Int, Long]] = spark.sparkContext.broadcast(clusterTagMap)

    val prediction_to_Tag: UserDefinedFunction = udf(
      (prediction: Int) => {
        clusterTagMapBroadcast.value(prediction)
      }
    )

    val model: DataFrame = predictionDF.select(
      $"uid",
      prediction_to_Tag($"prediction").as("tagId")
    )
    model
  }

}
object RfeModel{
  def main(args: Array[String]): Unit = {
    val rfeModel = new RfeModel()
    rfeModel.executeModel(0L,false)
  }
}

