package cn.itcast.tags.ml.clustering

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.evaluation.ClusteringEvaluator
import org.apache.spark.mllib.clustering.DistanceMeasure
import org.apache.spark.sql.{DataFrame, SparkSession}
import scala.collection.immutable

/**
 * 针对鸢尾花数据集进行聚类，使用KMeans算法，采用肘部法则Elbow获取K的值，使用轮廓系数评估模型
 */
object IrisElbowClustering {
  def main(args: Array[String]): Unit = {
    // 构建SparkSession实例对象
    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", "4")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._
    // 1. 加载鸢尾花数据，使用libsvm格式
    val irisDF: DataFrame = spark.read
      .format("libsvm")
      .load("datas/iris_kmeans.txt")

    /*
      root
      |-- label: double (nullable = true)
      |-- features: vector (nullable = true)
    */
    // irisDF.printSchema()
    // irisDF.show(10, truncate = false)

    // TODO: 2. 设置不同K，从2开始到6，采用肘部法确定K值
    /*
    setDefault(
    k -> 2,
    maxIter -> 20,
    initMode -> MLlibKMeans.K_MEANS_PARALLEL,
    initSteps -> 2,
    tol -> 1e-4,
    distanceMeasure -> DistanceMeasure.EUCLIDEAN
    )
    */
    val clusters: immutable.Seq[(Int, String, Double)] = (2 to 6).map {
      k =>
        // a. 构建KMeans算法实例
        val kMeans: KMeans = new KMeans()
          .setFeaturesCol("features")
          .setPredictionCol("prediction")
          .setK(k) // 设置K
          .setMaxIter(20)
          // 设置距离计算方式：欧式距离和余弦距离
          //.setDistanceMeasure(DistanceMeasure.EUCLIDEAN)
          .setDistanceMeasure(DistanceMeasure.COSINE)

        // b. 算法应用数据训练模型
        val kMeansModel: KMeansModel = kMeans.fit(irisDF)
        // c. 模型预测，对数据聚类
        val predictionDF: DataFrame = kMeansModel.transform(irisDF)
        val preResult: String = predictionDF
          .groupBy($"prediction")
          .count()
          .select($"prediction", $"count")
          .as[(Int, Long)]
          .rdd
          .collectAsMap()
          .mkString(",")

        // d. 模型评估器
        val evaluator: ClusteringEvaluator = new ClusteringEvaluator()
          .setFeaturesCol("features")
          .setPredictionCol("prediction")
          //.setDistanceMeasure("squaredEuclidean") // 欧式距离
          .setDistanceMeasure("cosine") // 余弦距离
          .setMetricName("silhouette") // 轮廓系数

        // e. 计算轮廓系数
        val silhouette: Double = evaluator.evaluate(predictionDF)
        // f. 返回三元组
        (k, preResult, silhouette)
    }
    // 打印聚类中K值及SH 轮廓系数
    /*
      (2,1 -> 97,0 -> 53,0.8501515983265806)
      (3,2 -> 39,1 -> 50,0 -> 61,0.7342113066202725)
      (4,2 -> 28,1 -> 50,3 -> 43,0 -> 29,0.6748661728223084)
      (5,2 -> 30,4 -> 17,1 -> 33,3 -> 47,0 -> 23,0.5593200358940349)
      (6,2 -> 47,5 -> 18,4 -> 13,1 -> 19,3 -> 23,0 -> 30,0.5157126401818913)
    从上述结果可知，当K=3时，聚类是比较好的
    使用余弦距离计算，结果如下，同样表明K=3时，聚类效果最好
      (2,1 -> 50,0 -> 100,0.9579554849242657)
      (3,2 -> 46,1 -> 50,0 -> 54,0.7484647230660575)
      (4,2 -> 46,1 -> 19,3 -> 31,0 -> 54,0.5754341193280768)
      (5,2 -> 23,4 -> 22,1 -> 50,3 -> 28,0 -> 27,0.6430770644178772)
      (6,2 -> 43,5 -> 21,4 -> 18,1 -> 29,3 -> 15,0 -> 24,0.4512255960897416)
    */
    clusters.foreach(println)

    // 应用结束，关闭资源
    spark.stop()
  }
}