package com.shujia.mllib

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo07KMeans {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo07KMeans")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", "8")
      .getOrCreate()

    import spark.implicits._

    // 1、加载数据 进行特征工程处理
    val kMeansDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("x Double,y Double")
      .load("Spark/data/mllib/data/kmeans.txt")

    val preDF: DataFrame = kMeansDF
      .as[(Double, Double)]
      .map {
        case (x: Double, y: Double) =>
          Tuple1(Vectors.dense(x, y))
      }.toDF("features")

    // 不需要将数据切分成测试集和训练集


    // 2、选择模型
    val kMeans: KMeans = new KMeans()
      .setK(2) // 表示将数据分为几类

    // 3、使用所有的数据训练模型
    val kMeansModel: KMeansModel = kMeans.fit(preDF)

    // 4、基于所有数据进行分类
    val preResDF: DataFrame = kMeansModel.transform(preDF)

    preResDF.show(truncate = false)
  }

}
