package cn.itcast.tags.ml.clustering

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * 使用KMeans算法对鸢尾花数据进行聚类操作
 */
object IrisClusterDemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()
    import org.apache.spark.sql.functions._
    import spark.implicits._

    // 1. 读取鸢尾花数据集
    val irisDF: DataFrame = spark.read
      .format("libsvm")
      .option("numFeatures",4)
      .load("datas/iris_kmeans.txt")

    /*
      root
       |-- label: double (nullable = true)
       |-- features: vector (nullable = true)
     */
    irisDF.printSchema()
    /*
      +-----+-------------------------------+
      |label|features                       |  稠密向量：(5.1, 3.5, 1.4, 0.2)
      +-----+-------------------------------+
      |1.0  |(4,[0,1,2,3],[5.1,3.5,1.4,0.2])|  -> 稀疏向量表示法
      |1.0  |(4,[0,1,2,3],[4.9,3.0,1.4,0.2])|
     */
    irisDF.show(10, truncate = false)

    // 2. 创建KMeans模型学习器对象（算法）
    val kMeans: KMeans = new KMeans()
      // 设置输入列和预测之列名称
      .setFeaturesCol("features")
      .setPredictionCol("prediction")
      // 设置K值为3
      .setK(3)
      // 设置迭代次数
      .setMaxIter(20)
      // 设置KMeans算法底层：random、k-means||
      .setInitMode("k-means||")

    // 3. 使用数据集训练模型
    val kMeansModel: KMeansModel = kMeans.fit(irisDF)
    // 获取聚类的簇中心点
    kMeansModel.clusterCenters.foreach(println)

    // 4. 模型评估，计算误差平方和SSE
    val wssse: Double = kMeansModel.computeCost(irisDF)
    // Within Set Sum of Squared Errors : 78.94506582597637
    println(s"WSSSE =  ${wssse}")

    // 5. 使用模型预测
    val predictionDF: DataFrame = kMeansModel.transform(irisDF)
    predictionDF.show(150, truncate = false)
    predictionDF
      .groupBy($"label", $"prediction")
      .count()
      .show(5, truncate = false)

    // 应用结束，关闭资源
    spark.stop()
  }
}
