package cn.itcast.tags.models.ml

import cn.itcast.tags.config.ModelConfig
import cn.itcast.tags.models.{AbstractModel, ModelType}
import cn.itcast.tags.tools.TagTools
import cn.itcast.tags.utils.HdfsUtils
import org.apache.hadoop.conf.Configuration
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.linalg
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{Column, DataFrame}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DataTypes

class RfmModel extends AbstractModel("RfmModel",ModelType.ML){
  override def doTag(businessDF: DataFrame, tagDF: DataFrame): DataFrame ={

    import businessDF.sparkSession.implicits._

    /**
     * +--------+----------------------+-----------+----------+
     * |memberid|ordersn               |orderamount|finishtime|
     * +--------+----------------------+-----------+----------+
     * |13      |gome_792756751164275  |2479.45    |1590249600|
     */
   // businessDF.printSchema()
    //businessDF.show(10,false)
    val ruleMap: Map[String, Long] = TagTools.convertMap(tagDF)
    //4 -> 360##1 -> 357##0 -> 356##2 -> 358##3 -> 359
    println(ruleMap.mkString("##"))

    val rfmDF: DataFrame = businessDF
      .groupBy($"memberid")
      .agg(
        max($"finishtime").as("finish_time"),
        count($"ordersn").as("frequency"),
        sum($"orderamount".cast(DataTypes.createDecimalType(10, 2))).as("monetary")
      )
      .select(
        $"memberid".as("uid"),
        datediff(current_timestamp(), from_unixtime($"finish_time")).as("recency"),
        $"frequency",
        $"monetary"
      )

    /**
     * root
     * |-- recency: integer (nullable = true)
     * |-- Frequency: long (nullable = false)
     * |-- monetary: decimal(20,2) (nullable = true)
     * +-------+---------+---------+
     * |recency|Frequency|monetary |
     * +-------+---------+---------+
     * |77     |221      |523184.51|
     * |77     |90       |155619.33|
     */
    //rfmDF.printSchema()
   // rfmDF.show(10,false)

    /**
     * R: 1-3天=5分，4-6天=4分，7-9天=3分，10-15天=2分，大于16天=1分
     * F: ≥200=5分，150-199=4分，100-149=3分，50-99=2分，1-49=1分
     * M: ≥20w=5分，10-19w=4分，5-9w=3分，1-4w=2分，<1w=1分
     */

    // R 打分判断条件
    val rWhen: Column = when($"recency".between(1, 3), 5.0)
      .when($"recency".between(4, 6), 4.0)
      .when($"recency".between(7, 9), 3.0)
      .when($"recency".between(10, 15), 2.0)
      .when($"recency".geq(16), 1.0)
    // F 打分条件表达式
    val fWhen = when(col("frequency").between(1, 49), 1.0) //
      .when(col("frequency").between(50, 99), 2.0) //
      .when(col("frequency").between(100, 149), 3.0) //
      .when(col("frequency").between(150, 199), 4.0) //
      .when(col("frequency").geq(200), 5.0) //
    // M 打分条件表达式
    val mWhen = when(col("monetary").lt(10000), 1.0) //
      .when(col("monetary").between(10000, 49999), 2.0) //
      .when(col("monetary").between(50000, 99999), 3.0) //
      .when(col("monetary").between(100000, 199999), 4.0) //
      .when(col("monetary").geq(200000), 5.0) //
    val rfm_score: DataFrame = rfmDF.select(
      $"uid",
      rWhen.as("r_score"),
      fWhen.as("f_score"),
      mWhen.as("m_score")
    )

    /**
     * +-------+-------+-------+
     * |r_score|f_score|m_score|
     * +-------+-------+-------+
     * |1.0    |5.0    |5.0    |
     */
   // rfm_score.printSchema()
    //rfm_score.show(10,false)

    val assembler = new VectorAssembler()
      .setInputCols(Array("r_score", "f_score", "m_score"))
      .setOutputCol("features")

    /**
     * root
     * |-- r_score: double (nullable = true)
     * |-- f_score: double (nullable = true)
     * |-- m_score: double (nullable = true)
     * |-- features: vector (nullable = true)
     * +-------+-------+-------+-------------+
     * |r_score|f_score|m_score|features     |
     * +-------+-------+-------+-------------+
     * |1.0    |5.0    |5.0    |[1.0,5.0,5.0]|
     */
    val featuresDF: DataFrame = assembler.transform(rfm_score)
    //featuresDF.printSchema()
    //featuresDF.show(10,false)

    // Trains a k-means model.
    //todo 提取成方法
    val kMeansModel: KMeansModel = trainModel(featuresDF)

    // Shows the result.
    /**
     * Cluster Centers:
     * ([1.0,3.0,4.0],0)
     * ([1.0,3.0,5.0],1)
     * ([1.0,5.0,5.0],2)
     * ([1.0,2.0,5.0],3)
     * ([1.0,2.0,4.0],4)
     */
    println("Cluster Centers: ")
    val zipWithIndex: Array[((Double, Int), Int)] = kMeansModel.clusterCenters
      .zipWithIndex
      .map {
        case (vector, index) =>
          (vector.toArray.sum, index)
      }
      .sortBy(-_._1)

      /**
       * Cluster Centers:
       * ((11.0,2),0)
       * ((9.0,1),1)
       * ((8.0,0),2)
       * ((8.0,3),3)
       * ((7.0,4),4)
       */
      .zipWithIndex

    val clusterIndexWithTagid: Map[Int, Long] = zipWithIndex.map {
      case ((_, clusterIndex), index) =>
        (clusterIndex, ruleMap(index.toString))
    }.toMap

    val prediction_to_tag: UserDefinedFunction = udf(
      (prediction: String) => {
        clusterIndexWithTagid(prediction.toInt)
      }
    )




    clusterIndexWithTagid.foreach(println)


    /**
        +---+-------+-------+-------+-------------+----------+
        |uid|r_score|f_score|m_score|features     |prediction|
        +---+-------+-------+-------+-------------+----------+
        |1  |1.0    |5.0    |5.0    |[1.0,5.0,5.0]|2         |
        |102|1.0    |2.0    |4.0    |[1.0,2.0,4.0]|4         |
        |107|1.0    |3.0    |4.0    |[1.0,3.0,4.0]|0         |
     */
    val model: DataFrame = kMeansModel.transform(featuresDF)
        .select(
          $"uid",
          prediction_to_tag($"prediction").as("tagId")
        )

    model.printSchema()
    model.show(10,false)

    model
  }
  def  trainModel(featuresDF: DataFrame):KMeansModel={
    val kmeans = new KMeans()
      .setK(5)
      .setSeed(123L)
      .setMaxIter(20)
      .setInitMode("k-means||")
      .setFeaturesCol("features")
      .setPredictionCol("prediction")
    val kMeansModel: KMeansModel = kmeans.fit(featuresDF)

    // Evaluate clustering by computing Within Set Sum of Squared Errors.
    val WSSSE = kMeansModel.computeCost(featuresDF)
    println(s"Within Set Sum of Squared Errors = $WSSSE")
    kMeansModel
  }
}


object RfmModel{
  def main(args: Array[String]): Unit = {
    val rfmModel = new RfmModel()
    rfmModel.executeModel(355L)
  }
}