package com.shujia.spark.mllib

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo07KMeans {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .master("local[*]") // 设置运行的方式
      .config("spark.sql.shuffle.partitions", "16")
      .getOrCreate()

    import spark.implicits._

    val df: DataFrame = spark
      .sparkContext
      .textFile("spark/data/mllib/data/kmeans.txt")
      .map(line => {
        val splits: Array[String] = line.split(",")
        val x: Double = splits(0).toDouble
        val y: Double = splits(1).toDouble
        Tuple1(Vectors.dense(x, y))
      }).toDF("features")

    val kmeans: KMeans = new KMeans()
      .setK(2) // 设置类别数量
      .setSeed(1L)

    val model: KMeansModel = kmeans.fit(df)

    // Make predictions
    val predictions: DataFrame = model.transform(df)

    predictions.show(50, truncate = false)


  }

}
