package com.timeriver.cases.clustering

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.ml.evaluation.ClusteringEvaluator
import org.apache.spark.ml.feature.{StringIndexer, VectorAssembler}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.tuning.{CrossValidator, CrossValidatorModel, ParamGridBuilder}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DoubleType

/**
  * 数据量较小，预测准确率较低，但轮廓字数值接近 1
  *   聚类算法属于无监督学习，一般不用于预测，多用于查看高维数据的分布特征
  */
object KmeansAlgCSV2Redis {
  def main(args: Array[String]): Unit = {
    val session: SparkSession = SparkSession.builder()
      .appName("聚类算法案例")
      .master("local[6]")
      .getOrCreate()

    val df: DataFrame = session.read
      .csv("D:\\workspace\\gitee_space\\spark-ml-machine-learning\\data\\iris.csv")
      .toDF("SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "class")

    df.printSchema()

    val transformData: DataFrame = df.withColumn("SepalLength", col("SepalLength").cast(DoubleType))
      .withColumn("SepalWidth", col("SepalWidth").cast(DoubleType))
      .withColumn("PetalLength", col("PetalLength").cast(DoubleType))
      .withColumn("PetalWidth", col("PetalWidth").cast(DoubleType))

    transformData.show(5, false)
    transformData.printSchema()

    val data: DataFrame = new VectorAssembler()
      .setInputCols(Array("SepalLength", "SepalWidth", "PetalLength", "PetalWidth"))
      .setOutputCol("features")
      .transform(transformData)

    val Array(train, test) = data.randomSplit(Array(0.8, 0.2))

    val indexer: StringIndexer = new StringIndexer()
      .setInputCol("class")
      .setOutputCol("label")

    val kMeans: KMeans = new KMeans()
      .setMaxIter(20)
      .setK(3)
      .setFeaturesCol("features")

    /** 构建管道模型 */
    val pipeline: Pipeline = new Pipeline().setStages(Array(indexer, kMeans))

    /** 网格参数搜索 */
    val paramMaps: Array[ParamMap] = new ParamGridBuilder()
      .addGrid(kMeans.initMode, Array("random", "k-means||"))
      .build()

    val validator: CrossValidator = new CrossValidator()
      .setEstimator(pipeline)
      .setEvaluator(new ClusteringEvaluator)
      .setEstimatorParamMaps(paramMaps)
      /** 在生产实践中使用3或以上 */
      .setNumFolds(2)
      .setParallelism(2)

    val model: CrossValidatorModel = validator.fit(train)

    val res: DataFrame = model.transform(test)

    res.show(30, false)

    /** 评估报告
      * 一个目标对于目标所在簇与其他簇之间的相似性。其范围是从-1~+1，这个值越大表明目标与自己所在簇之间的匹配关系度越高，
      * 与其他簇的匹配关系度越低。如果这个值越高，那么聚类结果越好，如果是很小或是负值，那么可能是分簇太多或是太少造成的。
      **/
    val evaluator: ClusteringEvaluator = new ClusteringEvaluator()
      .setFeaturesCol("features")
      .setPredictionCol("prediction")
      .setMetricName("silhouette")

    val silhouette: Double = evaluator.evaluate(res)
    println(s"轮廓系数值=$silhouette")

    val accuracy: DataFrame = res.selectExpr("SUM(case when label=prediction then 1 else 0 end)/count(1) as accuracy")
    val rate: Double = accuracy.take(1)(0).getAs[Double]("accuracy")
    println(s"预测成功率为：$rate%")

    session.stop()
  }
}
