package ml

import base.BaseModel
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.{DataFrame, SparkSession, functions}

import scala.collection.immutable

/**
  * 此代码用于计算用户模型
  */
object RFM extends BaseModel {
  def main(args: Array[String]): Unit = {
    exec()
  }

  //程序名称
  override def setAppName: String = "RFM"

  //RFM用户价值 id
  override def setFourId: String = "138"

  //具体实现方法
  override def getNewTag(spark: SparkSession, fiveTagDF: DataFrame, HBaseDF: DataFrame): DataFrame = {
    //用于每个用户有多个订单，所以计算一个用户的RFM需要使用用户id进行分组

    //最近消费
    var recencySrt = "recency"
    //消费频率
    var frequencyStr = "frequency"
    //消费金额
    var moneyTotalSrt = "moneyTotal"

    //特征单词
    var features = "features"
    //预测
    var prediction = "prediction"

    //隐式转化
    import spark.implicits._
    //引入sparkSQL的内置函数
    import org.apache.spark.sql.functions._

    //用于计算R数值 当前时间与最近时间时间差
    var getRecency = functions.datediff(current_timestamp(), from_unixtime(max("finishTime"))) - 300 as recencySrt
    //计算F
    var getFrequency = functions.count("orderSn") as frequencyStr
    //计算M
    var getMoney = functions.sum("orderAmount") as moneyTotalSrt

    val getRFM: DataFrame = HBaseDF.groupBy("memberId").agg(getRecency, getFrequency, getMoney)
    //    getRFM.show(false)
    //    HBaseDF.createOrReplaceTempView("HBaseDF")
    //        val RFM_Group: DataFrame = spark.sql(
    //          """
    //            |SELECT memberId,
    //            |CASE WHEN recency>=1 && recency<=3 THEN 5
    //            |WHEN WHEN recency>=4 && recency<=6 THEN 4
    //            |WHEN WHEN recency>=7 && recency<=9 THEN 3
    //            |WHEN WHEN recency>=10 && recency<=15 THEN 2
    //            |WHEN WHEN recency>=16 THEN 1 ELSE 0 END,
    //            |CASE WHEN frequency>=200 THEN 5
    //            |WHEN WHEN frequency>=150 && frequency<=199 THEN 4
    //            |WHEN WHEN frequency>=100 && frequency<=149 THEN 3
    //            |WHEN WHEN frequency>=50 && frequency<=99 THEN 2
    //            |WHEN WHEN frequency>=1 && frequency<=49 THEN 1 ELSE 0 END,
    //            |FROM
    //            |(SELECT memberId,
    //            |CASE WHEN
    //            |DATEDIFF(current_timestamp(),from_unixtime(max(finishTime)))-300 as recency,
    //            |COUNT(orderSn) as frequency,
    //            |SUM(orderAmount) as moneyTotal
    //            |FROM HBaseDF
    //            |GROUP BY memberId) as tmp_group
    //            |""".stripMargin).toDF("memberId", "recency", "frequency", "moneyTotal")
    //
    //    RFM_Group.createOrReplaceTempView("RFM_Group")
    //    spark.sql(
    //      """
    //        |SELECT
    //        |CASE WHEN recency>=1 && recency<=3 THEN 5
    //        |WHEN
    //        |FROM RFM_Group
    //        |""".stripMargin).show()

    //现有的RFM量纲不统一
    //计算r的分数
    var getRecencyScore = functions.when((col(recencySrt) >= 1) && (col(recencySrt) <= 3), 5)
      .when((col(recencySrt) >= 4) && (col(recencySrt) <= 6), 4)
      .when((col(recencySrt) >= 7) && (col(recencySrt) <= 9), 3)
      .when((col(recencySrt) >= 10) && (col(recencySrt) <= 15), 2)
      .when((col(recencySrt) >= 16), 1).as(recencySrt)

    //计算F的分数
    var getFrequencyScore = functions.when((col(frequencyStr) >= 200), 5)
      .when((col(frequencyStr) >= 150) && (col(frequencyStr) <= 199), 4)
      .when((col(frequencyStr) >= 100) && (col(frequencyStr) <= 149), 3)
      .when((col(frequencyStr) >= 50) && (col(frequencyStr) <= 99), 2)
      .when((col(frequencyStr) >= 1) && (col(frequencyStr) <= 49), 1).as(frequencyStr)

    //计算M的金额
    var getMoneyScore = functions.when((col(moneyTotalSrt) >= 200000), 5)
      .when((col(moneyTotalSrt) >= 100000) && (col(moneyTotalSrt) <= 199999), 4)
      .when((col(moneyTotalSrt) >= 50000) && (col(moneyTotalSrt) <= 99999), 3)
      .when((col(moneyTotalSrt) >= 10000) && (col(moneyTotalSrt) <= 49999), 2)
      .when((col(frequencyStr) <= 9999), 1).as(moneyTotalSrt)

    //计算RFM分数
    val getRFMScoreDF: DataFrame = getRFM.select('memberId, getRecencyScore, getFrequencyScore, getMoneyScore)
    //    getRFMScoreDF.show()

    //3 将数据转化为特征向量
    val RFMFeaturesDF: DataFrame = new VectorAssembler()
      .setInputCols(Array(recencySrt, frequencyStr, moneyTotalSrt))
      .setOutputCol(features)
      .transform(getRFMScoreDF)


    //4 数据分类

    val kMeansModel: KMeansModel = new KMeans()
      .setK(7) //设置7类
      .setMaxIter(5) //迭代次数
      .setFeaturesCol(features) //设置特征向量
      .setPredictionCol("prediction") //计算完的标签结果
      .fit(RFMFeaturesDF)

    val RFM_predict: DataFrame = kMeansModel.transform(RFMFeaturesDF)
    //RFM_predict.show(false)

    //到这里，用户的分类已经完毕  不过分完类的0 1 2 3 4 5 6 只表示7个不同类别 不是标签中的级别
    //简单验证
    //    RFM_predict.groupBy("prediction")
    //      .agg(max(col("recency") + col("frequency") + col("moneyTotal")) as "max",
    //      .agg(min(col("recency") + col("frequency") + col("moneyTotal")) as "min"
    //      ).show()

    // 5 分类排序 遍历所有分类（0-6）
    //获取每个类内的价值 中心点包含的所有点的总和就是这个类的价值
    //分类和标签数据对应
    //kMeansModel.clusterCenters.indices 聚类中心角标
    //kMeansModel.clusterCenters(i) 具体某一个类别的簇
    val clusterCentersSum: immutable.IndexedSeq[(Int, Double)] =
    for (i <- kMeansModel.clusterCenters.indices)
      yield (i, kMeansModel.clusterCenters(i).toArray.sum)
    //clusterCentersSum.foreach(println)
    val clusterCentersSumSort: immutable.IndexedSeq[(Int, Double)] = clusterCentersSum.sortBy(_._2)
    //clusterCentersSumSort.reverse.foreach(println)
    //价值排名
    //(4,11.038461538461538)  0
    //(0,10.0)                1
    //(1,9.0)                 2
    //(3,8.0)                 3
    //(6,6.0)                 4
    //(5,4.4)                 5
    //(2,3.0)                 6

    //获取排好序后的分类的数据的角标
    //clusterCentersSumSort(a) (4,11.038461538461538)
    //clusterCentersSumSort(a)._1  4
    val clusterCenterIndex: immutable.IndexedSeq[(Int, Int)] = for (a <- clusterCentersSumSort.indices) yield (clusterCentersSumSort(a)._1, a)
    //clusterCenterIndex.foreach(println)

    //6 分类数据和标签数据join
    //将clusterCenterIndex转化为df，放标join
    val clusterCenterIndexDF: DataFrame = clusterCenterIndex.toDF(prediction, "index")

    //两个数据join
    val predict_ruleDF: DataFrame = clusterCenterIndexDF.join(fiveTagDF, fiveTagDF("rule") === clusterCenterIndexDF("index"))
    //predict_ruleDF.show()
    //+----------+-----+----+---+
    //|prediction|index|rule| id|
    //+----------+-----+----+---+
    //|         2|    0|   0|139|
    //|         5|    1|   1|140|
    //|         6|    2|   2|141|
    //|         3|    3|   3|142|
    //|         1|    4|   4|143|
    val predictIdDF: DataFrame = predict_ruleDF.select(prediction, "id")
    // predictIdDF.show()

    //+----------+---+
    //|prediction| id|
    //+----------+---+
    //|         2|139|
    //|         5|140|
    //|         6|141|
    //|         3|142|
    //|         1|143|
    //|         0|144|
    //|         4|145|
    //+----------+---+
    //7、获取数据标签（udf）
    val predictIdMap: Map[String, String] = predictIdDF.as[(String, String)].collect().toMap

    var getTagId = udf((prediction: String) => {
      predictIdMap.get(prediction)
    })
    //将predictIdDF
    val newTags: DataFrame = RFM_predict.select('memberId as ("userId"), getTagId('prediction).as("tagsId"))

    newTags
  }

}
