package cn.itcast.tags.ml.clustering

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.sql.{DataFrame, SparkSession}

object IrisClusterTest {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", 4)
      .getOrCreate()

    import spark.implicits._
    val irisDF: DataFrame = spark.read
      .format("libsvm")
      .option("numFeatures",4)
      .load("datas/iris/iris_kmeans.txt")

    irisDF.show(100,false)
    val kmeans = new KMeans()
      .setFeaturesCol("features")
      .setPredictionCol("prediction")
      .setK(3)
      .setMaxIter(20)
      .setInitMode("k-means||")//设置算法底层，random,kmeans||, 默认为kmeans||

    val kMeansModel: KMeansModel = kmeans.fit(irisDF)

    kMeansModel.clusterCenters.foreach(println)//78.94506582597637

    //模型评估
    val wssse: Double = kMeansModel.computeCost(irisDF)
    println("误差平方和：",wssse)

    // 5. 使用模型预测
    val predictionDF: DataFrame = kMeansModel.transform(irisDF)
    predictionDF.show(150, truncate = false)
    predictionDF
      .groupBy($"label", $"prediction")
      .count()
      .show(150, truncate = false)


    spark.stop()
  }
}
