package ml

import base.BaseModel
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.immutable

object RFM2 extends BaseModel {
  //程序名称
  override def setAppName: String = "RFM2"

  //标签id
  override def setFourId: String = "138"

  //计算用户新标签
  override def getNewTag(spark: SparkSession, fiveTagDF: DataFrame, HBaseDF: DataFrame): DataFrame = {
    //隐式转化
    import spark.implicits._
    //引入sparkSQL的内置函数
    import org.apache.spark.sql.functions._
    HBaseDF.createOrReplaceTempView("HBaseDF")
    //1 首先分组求出每个用户的最近消费时间  消费的频率  消费金额 然后根据规则给每个用户打分

    //    spark.sql(
    //      """
    //        |SELECT memberId,
    //        |DATEDIFF(current_timestamp(),from_unixtime(max(finishTime)))-300 as recency,
    //        |COUNT(orderSn) as frequency,
    //        |SUM(orderAmount) as moneyTotal
    //        |FROM HBaseDF
    //        |GROUP BY memberId
    //        |""".stripMargin).show()

    val USER_RFM_GRADE: DataFrame = spark.sql(
      """
        |SELECT memberId,
        |CASE WHEN recency between 1 and 3 THEN 5
        |     WHEN recency between 4 and 6 THEN 4
        |     WHEN recency between 7 and 9 THEN 3
        |     WHEN recency between 10 and 15 THEN 2
        |     WHEN recency>=16 THEN 1
        |     ELSE 0 END as recency,
        |CASE WHEN frequency>=200 THEN 5
        |     WHEN frequency between 150 and 199 THEN 4
        |     WHEN frequency between 100 and 149 THEN 3
        |     WHEN frequency between 50 and 99 THEN 2
        |     WHEN frequency between 1 and 49 THEN 1
        |     ELSE 0 END as frequency,
        |CASE WHEN moneyTotal>=200000 THEN 5
        |     WHEN moneyTotal between 100000 and 199999 THEN 4
        |     WHEN moneyTotal between 50000 and 99999 THEN 3
        |     WHEN moneyTotal between 10000 and 49999 THEN 2
        |     WHEN moneyTotal<=9999 THEN 1
        |     ELSE 0 END  as moneyTotal
        |FROM
        |(SELECT memberId,
        |DATEDIFF(current_timestamp(),from_unixtime(max(finishTime)))-300 as recency,
        |COUNT(orderSn) as frequency,
        |SUM(orderAmount) as moneyTotal
        |FROM HBaseDF
        |GROUP BY memberId)  AS tmp_group
        |""".stripMargin) toDF("memberId", "r_score", "f_score", "m_score")
    //USER_RFM_GRADE.show()

    //2 将数据转化为特征向量
    val USER_FEATURE: DataFrame = new VectorAssembler()
      .setInputCols(Array("r_score", "f_score", "m_score"))
      .setOutputCol("features")
      .transform(USER_RFM_GRADE)
    //USER_FEATURE.show()

    //3 K-means算法 对数据进行训练 得到模型
    val USER_K_M: KMeansModel = new KMeans()
      .setK(7) //设置7类
      .setMaxIter(5) //迭代次数
      .setFeaturesCol("features") //设置特征向量
      .setPredictionCol("prediction") //计算完的标签结果
      .fit(USER_FEATURE)

    //使用训练得到的USER_K_M转化器对USER_FEATURE进行转化 添加对应的标签列
    val USER_PREDICT: DataFrame = USER_K_M.transform(USER_FEATURE)
    //USER_PREDICT.show()

    //4 获取每个类内的价值 中心点包含的所有点的总和就是这个类的价值 然后对价值倒序排序
    //indices index 的复数形式之一 字面理解 根据索引列举出所有的值
    val CentersSum: immutable.IndexedSeq[(Int, Double)] =
    for (i <- USER_K_M.clusterCenters.indices)
      yield (i, USER_K_M.clusterCenters(i).toArray.sum)
    //CentersSum.foreach(println)
    //排序
    val CentersSort: immutable.IndexedSeq[(Int, Double)] = CentersSum.sortBy(_._2).reverse
    //CentersSort.foreach(println)

    //获取排好序后的分类的数据的角标 以方便和mysql中规则对应
    val CentersIndex: immutable.IndexedSeq[(Int, Int)] = for (i <- CentersSort.indices)
      yield (CentersSort(i)._1, i)
     CentersIndex.foreach(println)

    //5 将CentersIndex转化为DF和mysql五级标签规则进行join
    val CentersIndexDF: DataFrame = CentersIndex.toDF("predict", "index")

    //创建临时表
    CentersIndexDF.createOrReplaceTempView("CentersIndexDF")
    fiveTagDF.createOrReplaceTempView("fiveTagDF")

    //分类对应的价值级别
    val USER_P_ID: DataFrame = spark.sql(
      """
        |SELECT c.predict,f.id
        |FROM CentersIndexDF c
        |INNER JOIN fiveTagDF f
        |ON c.index=f.rule
        |""".stripMargin).toDF("predict", "id")

    //用USER_P_ID和USER_PREDICT进行关联 根据分类对应 为用户打标签
    //注册临时表
    USER_P_ID.createOrReplaceTempView("USER_P_ID")
    USER_PREDICT.createOrReplaceTempView("USER_PREDICT")

    val USER_TAGS: DataFrame = spark.sql(
      """
        |SELECT u_p.memberId AS userId,
        |u_p_id.id AS tagsId
        |FROM USER_PREDICT u_p
        |INNER JOIN USER_P_ID u_p_id
        |ON u_p.prediction=u_p_id.predict
        |""".stripMargin).toDF("userId", "tagsId")

    //返回用户当前计算出的标签
   // USER_TAGS
    HBaseDF
  }

  //程序启动入口
  def main(args: Array[String]): Unit = {
    exec()
  }
}
