package com.shujia.mllib

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo06KMeans {
  def main(args: Array[String]): Unit = {
    // 无监督学习 --> 聚类
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo06KMeans")
      .master("local[*]")
      .config("spark.sql.shuffle.partitions", 8)
      .getOrCreate()

    val sourceDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("x Double,y Double")
      .load("Spark/data/mllib/data/kmeans.txt")
    import spark.implicits._
    val dataDF: DataFrame = sourceDF
      .as[(Double, Double)]
      .map {
        case (x: Double, y: Double) =>
          val denseVec: linalg.Vector = Vectors.dense(Array(x, y))
          Tuple1(denseVec)
      }.toDF("features")

    val kmeans: KMeans = new KMeans()
      .setK(2) // 设置需要分为2类

    val kmeansModel: KMeansModel = kmeans.fit(dataDF)
    val resDF: DataFrame = kmeansModel
      .transform(dataDF)

    resDF.printSchema()

    resDF
      // 将Vector转成Array再转成String再以CSV格式保存
     .as[(linalg.Vector, Int)]
      .map {
        case (features: linalg.Vector, prediction: Int) =>
          (features.toArray(0),features.toArray(1) ,prediction)
      }
      .write
      .format("csv")
      .mode(SaveMode.Overwrite)
      .save("Spark/data/mllib/kmeans")

  }

}
