package ml

import base.BaseModel
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.immutable

object RFE extends BaseModel {

  //程序名称
  override def setAppName: String = "RFE"

  //标签id
  override def setFourId: String = "146"

  override def getNewTag(spark: SparkSession, fiveTagDF: DataFrame, HBaseDF: DataFrame): DataFrame = {
    //1 计算出RFE 并统一量纲
    import spark.implicits._
    spark.sparkContext.setLogLevel("WARN")
    HBaseDF.createOrReplaceTempView("HBaseDF")
    val RFE_DF: DataFrame = spark.sql(
      """
        |SELECT * FROM
        |(SELECT global_user_id,
        |CASE WHEN recently BETWEEN 0 AND 15 THEN 5
        |     WHEN recently BETWEEN 16 AND 30 THEN 4
        |     WHEN recently BETWEEN 31 AND 45 THEN 3
        |     WHEN recently BETWEEN 46 AND 60 THEN 2
        |     WHEN recently > 60 THEN 1
        |     ELSE 0 END AS r,
        |CASE WHEN frequency >=400 THEN 5
        |     WHEN frequency BETWEEN 300 AND 399 THEN 4
        |     WHEN frequency BETWEEN 200 AND 299 THEN 3
        |     WHEN frequency BETWEEN 100 AND 199 THEN 2
        |     WHEN frequency <=99 THEN 1
        |     ELSE 0 END AS f,
        |CASE WHEN engagement >=250 THEN 5
        |     WHEN engagement BETWEEN 200 AND 249 THEN 4
        |     WHEN engagement BETWEEN 150 AND 199 THEN 3
        |     WHEN engagement BETWEEN 50 AND 149 THEN 2
        |     WHEN engagement <=49 THEN 1
        |     ELSE 0 END AS e
        |FROM
        |(SELECT global_user_id,
        |DATEDIFF(current_timestamp(),max(log_time)) as recently,
        |count(loc_url) as frequency,
        |count(distinct loc_url) as engagement
        |FROM HBaseDF
        |GROUP BY global_user_id)tmp)tmp1
        |WHERE r!=0 and f!=0 and e!=0
        |""".stripMargin).toDF("userId", "r", "f", "e")

    //2 将数据转化为特征向量
    val RFEFeatures: DataFrame = new VectorAssembler()
      .setInputCols(Array("r", "f", "e"))
      .setOutputCol("features")
      .transform(RFE_DF)

    //3 k-means聚类 对数据训练得到转化器 然后为dataFrame
    val kMeansModel: KMeansModel = new KMeans()
      .setK(4)
      .setMaxIter(5)
      .setSeed(10)
      .setFeaturesCol("features")
      .setPredictionCol("prediction")
      .fit(RFEFeatures)

    val RFEPredict: DataFrame = kMeansModel.transform(RFEFeatures)
    //RFEPredict.show()

    //4 根据KMeans中心分类价值排序
    val clusterCentersSum: immutable.IndexedSeq[(Int, Double)] = for (a <- kMeansModel.clusterCenters.indices) yield (a, kMeansModel.clusterCenters(a).toArray.sum)
    val clusterCentersSumSort: immutable.IndexedSeq[(Int, Double)] = clusterCentersSum.sortBy(-_._2)

    //获取排序后的分类值的索引
    val clusterCentersIndex: immutable.IndexedSeq[(Int, Int)] = for (index <- clusterCentersSumSort.indices) yield (clusterCentersSumSort(index)._1, index)
    val predictIndexDF: DataFrame = clusterCentersIndex.toDF("prediction", "index")
    //predictIndexDF.show()

    //5 join五级标签和indexDF
    fiveTagDF.createOrReplaceTempView("fiveTagDF")
    predictIndexDF.createOrReplaceTempView("predictIndexDF")
    val predictTagsDF: DataFrame = spark.sql(
      """
        |SELECT PI.prediction,f.id
        |FROM fiveTagDF f
        |INNER JOIN predictIndexDF PI
        |ON f.rule=PI.index
        |""".stripMargin).toDF("prediction", "id")

    predictTagsDF.show()





    HBaseDF
  }

  //程序入口
  def main(args: Array[String]): Unit = {
    exec()
  }
}
