package ml

import base.BaseModel
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.{DataFrame, SparkSession}

import scala.collection.immutable

object PSM extends BaseModel {
  //设置程序名
  override def setAppName: String = "PSM"

  //设置标签id
  override def setFourId: String = "151"

  //返回新计算标签
  override def getNewTag(spark: SparkSession, fiveTagDF: DataFrame, HBaseDF: DataFrame): DataFrame = {
    //隐式转化
    import spark.implicits._
    spark.sparkContext.setLogLevel("WARN")

    //1 计算出psm
    HBaseDF.createOrReplaceTempView("HBaseDF")
    val PSM_DF: DataFrame = spark.sql(
      """
        |SELECT * FROM
        |(SELECT memberId,
        |(discountTimes/purchaseTimes) +
        |((preferentialAmount/discountTimes)/(amountReceived/purchaseTimes))+
        |(preferentialAmount/amountReceived) psm
        |FROM
        |(SELECT memberId,
        |SUM(CASE WHEN couponCodeValue != 0 THEN 1 ELSE 0 END) discountTimes,
        |COUNT(orderSn) purchaseTimes,
        |SUM(couponCodeValue) preferentialAmount,
        |SUM((orderAmount + couponCodeValue)) amountReceived
        |FROM HBaseDF
        |GROUP BY memberId)tmp)tmp1
        |WHERE psm is not null and memberId is not null
        |""".stripMargin).toDF("memberId", "psm")

    // PSM_DF.show()

    //2 把数据转化为特征向量
    val PSMFeature: DataFrame = new VectorAssembler()
      .setInputCols(Array("psm"))
      .setOutputCol("features")
      .transform(PSM_DF)

    //3 训练k-means算法得到 转化器
    val kMeansModel: KMeansModel = new KMeans()
      .setK(5)
      .setMaxIter(5)
      .setSeed(10)
      .setFeaturesCol("features")
      .setPredictionCol("prediction")
      .fit(PSMFeature)

    //用转化器对数据进行转化 添加列 把一个dataFrame转转为另一个dataFrame
    val PSMPredict: DataFrame = kMeansModel.transform(PSMFeature)
    // PSMPredict.show(false)

    //4 从高到低将分类排序
    val clusterCentersSum: immutable.IndexedSeq[(Int, Double)] = for (a <- kMeansModel.clusterCenters.indices) yield (a, kMeansModel.clusterCenters(a).toArray.sum)
    val clusterCentersSumSort: immutable.IndexedSeq[(Int, Double)] = clusterCentersSum.sortBy(-_._2)

    //获取排序后的分类值的索引
    val clusterCentersIndex: immutable.IndexedSeq[(Int, Int)] = for (index <- clusterCentersSumSort.indices) yield (clusterCentersSumSort(index)._1, index)
    val IndexDF: DataFrame = clusterCentersIndex.toDF("prediction", "index")

    //5 join五级标签和indexDF
    fiveTagDF.createOrReplaceTempView("fiveTagDF")
    IndexDF.createOrReplaceTempView("IndexDF")
    val FI_DF: DataFrame = spark.sql(
      """
        |SELECT I.prediction,f.id
        |FROM fiveTagDF f
        |INNER JOIN IndexDF I
        |ON f.rule=I.index
        |""".stripMargin).toDF("prediction", "id")

    //把FI_DF转化为map 注册udf函数 用于标签匹配
    val predictIndexMap: Map[String, String] = FI_DF.as[(String, String)].collect().toMap
    spark.udf.register("getTags", (prediction: String) => {
      predictIndexMap.get(prediction)
    })

    //6 PSMPredict 通过udf函数 计算得到最新标签 并返回
    PSMPredict.createOrReplaceTempView("PSMPredict")

    //将计算结果返回
    spark.sql(
      """
        |SELECT memberId as userId,
        |getTags(prediction) as tagsId
        |FROM PSMPredict
        |""".stripMargin)

  }

  //程序入口
  def main(args: Array[String]): Unit = {
    exec()
  }
}
