package com.shujia.spark.mllib

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo05KMeans {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("Demo05KMeans")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 1、加载数据进行数据特征工程处理
    val kmenasRDD: RDD[String] = spark.sparkContext.textFile("spark/data/mllib/data/kmeans.txt")
    val kmeansDF: DataFrame = kmenasRDD
      .map(line => {
        val splits: Array[String] = line.split(",")
        val x: Double = splits(0).toDouble
        val y: Double = splits(1).toDouble
        Tuple1(Vectors.dense(x, y))
      }).toDF("features")

    // 2、因为没有Label 故不需要切分训练集测试集
    // 3、选择KMeans模型
    val kMeans: KMeans = new KMeans()
      .setK(2) // 设置最终需要分为几类

    val model: KMeansModel = kMeans.fit(kmeansDF)

    // Make predictions
    val predictions: DataFrame = model.transform(kmeansDF)

    predictions.show()

  }

}
