package com.shujia.ml

import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Code07KMeans {
  def main(args: Array[String]):  Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()

    val kmeansRDD: RDD[String] = spark.sparkContext.textFile("spark_code/data/ml/kmeans.txt")

    import spark.implicits._

    val dataFrame: DataFrame = kmeansRDD.map {
      line =>
        val splitRes: Array[String] = line.split(",")
        Tuple1(Vectors.dense(splitRes(0).toDouble, splitRes(1).toDouble))
    }.toDF("features")


    // Trains a k-means model.
    val kmeans = new KMeans()
      .setK(2) // 设置分类的种类为2
      .setSeed(1L)
    val model = kmeans.fit(dataFrame)


    val tfDF: DataFrame = spark.sparkContext.parallelize(
      List((1.0, 2.0), (6.3, 8.3))
    ).map {
      case (x, y) => {
        Tuple1(Vectors.dense(x, y))
      }
    }.toDF("features")

    // Make predictions
    val predictions = model.transform(tfDF)
    predictions.show()

  }
}
