package ml

import base.BaseModel
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.functions.{current_timestamp, from_unixtime, max}
import org.apache.spark.sql.{DataFrame, SparkSession, functions}

import scala.collection.immutable

object RFE2 extends BaseModel{

//程序名称
  override def setAppName: String = "RFE2"
//标签id
  override def setFourId: String = "146"
//获取最新表签结果
  override def getNewTag(spark: SparkSession, fiveTagDF: DataFrame, HBaseDF: DataFrame): DataFrame = {
    //1 获取数据中RFE的数据

    //最近消费
    var recencySrt = "recency"
    //消费频率
    var frequencyStr = "frequency"
    //消费金额
    var engagementStr = "engagement"
    //特征单词
    var features = "features"
    //预测
    var prediction = "prediction"


    //用于计算R数值 当前时间与最近时间时间差
    var getRecency = functions.datediff(current_timestamp(), max("log_time"))  as recencySrt
    //计算F
    var getFrequency = functions.count("loc_url") as frequencyStr
    //计算M
    var getEngagement = functions.countDistinct("loc_url") as engagementStr
    //隐式转化
    import spark.implicits._
    //引入sparkSQL的内置函数
    import org.apache.spark.sql.functions._
    val RFEDF: DataFrame = HBaseDF.groupBy("global_user_id")
      .agg(getRecency, getFrequency, getEngagement)
   // RFEDF.show()

    //2 为RFE打分
    //现有的RFE量纲不统一
    //计算r的分数
    var getRecencyScore = functions.when((col(recencySrt) >= 0) && (col(recencySrt) <= 15), 5)
      .when((col(recencySrt) >= 16) && (col(recencySrt) <= 30), 4)
      .when((col(recencySrt) >= 31) && (col(recencySrt) <= 45), 3)
      .when((col(recencySrt) >= 46) && (col(recencySrt) <= 60), 2)
      .when((col(recencySrt) >= 61), 1).as(recencySrt)

    //计算F的分数
    var getFrequencyScore = functions.when((col(frequencyStr) >= 400), 5)
      .when((col(frequencyStr) >= 300) && (col(frequencyStr) <= 399), 4)
      .when((col(frequencyStr) >= 200) && (col(frequencyStr) <= 299), 3)
      .when((col(frequencyStr) >= 100) && (col(frequencyStr) <= 199), 2)
      .when((col(frequencyStr) <= 99), 1).as(frequencyStr)

    //计算M的金额
    var getEngagementScore = functions.when((col(engagementStr) >= 250), 5)
      .when((col(engagementStr) >= 230) && (col(engagementStr) <= 249), 4)
      .when((col(engagementStr) >= 210) && (col(engagementStr) <= 229), 3)
      .when((col(engagementStr) >= 200) && (col(engagementStr) <= 209), 2)
      .when((col(engagementStr) <= 99), 1).as(engagementStr)

    val RFEScore: DataFrame = RFEDF.select('global_user_id, getRecencyScore, getFrequencyScore, getEngagementScore)


    //3 将数据转化为特征向量

    val RFEFeatures: DataFrame = new VectorAssembler()
      .setInputCols(Array(recencySrt, frequencyStr, frequencyStr))
      .setOutputCol(features)
      .transform(RFEScore)

    //4 数据分类
    val kMeansModel: KMeansModel = new KMeans()
      .setK(7) //设置7类
      .setMaxIter(5) //迭代次数
      .setFeaturesCol(features) //设置特征向量
      .setPredictionCol("prediction") //计算完的标签结果
      .fit(RFEFeatures)

    val kMeansPredict: DataFrame = kMeansModel.transform(RFEFeatures)
    kMeansPredict.show()

    //5 获取每个类的活跃度 本质上是计算每个类的价值，并按价值倒序排序
    //5.1 获取每个分类的价值
    val clusterCentersSum: immutable.IndexedSeq[(Int, Double)] = for (a <- kMeansModel.clusterCenters.indices) yield (a, kMeansModel.clusterCenters(a).toArray.sum)
    val clusterCentersSumSort: immutable.IndexedSeq[(Int, Double)] = clusterCentersSum.sortBy(_._2).reverse

    //6 获取排序后的数据的类别和对应的角标
    val predictIndex: immutable.IndexedSeq[(Int, Int)] = for (b <- clusterCentersSumSort.indices) yield (clusterCentersSumSort(b)._1, b)
    val predictDF: DataFrame = predictIndex.toDF(prediction, "index")

    //7 和五级标签数据进行join
    val predictTagIdDF: DataFrame = predictDF.join(fiveTagDF, predictDF("index") === fiveTagDF("rule")).select(prediction, "id")

    //8 第七步数据与分类数据join



    HBaseDF
  }
  //程序入口
  def main(args: Array[String]): Unit = {
    exec()
  }
}
