package com.shujia.mllib

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo07KMeans {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("Demo07KMeans")
      .getOrCreate()

    import spark.implicits._

    val df: DataFrame = spark
      .sparkContext
      .textFile("spark/data/mllib/data/kmeans.txt")
      .map(line => {
        val arr: Array[Double] = line.split(",").map(s => s.toDouble)
        Tuple1(Vectors.dense(arr))
      }).toDF("features")

    val kMeans: KMeans = new KMeans()
      .setK(2) // 设置最终需要分成几组
    val kMeansModel: KMeansModel = kMeans.fit(df)
    val predictDF: DataFrame = kMeansModel.transform(df)
    predictDF.show()

  }

}
