package com.shujia.mllib

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo7KMeans {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("Demo4ReadImage")
      .config("spark.sql.shuffle.partitions", 4)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    /**
     * 特征工程处理
     */
    val kmeansDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("x Double,y Double")
      .load("spark/data/kmeans.txt")

    val kmeansData: DataFrame = kmeansDF
      .as[(Double, Double)]
      .map {
        case (x: Double, y: Double) => {
          val denseVec: linalg.Vector = Vectors.dense(Array(x, y))
          Tuple1(denseVec)
        }
      }.toDF("features")

    /**
     * 2、选择模型 kmeans
     */
    val km: KMeans = new KMeans()
      .setK(2)

    /**
     * 3、使用KMeans进行聚类
     */

    val model: KMeansModel = km.fit(kmeansData)
    val resDF: DataFrame = model.transform(kmeansData)

    resDF.show(1000, false)

  }

}
