package com.shujia.mllib

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.linalg
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object Demo4Kmeans {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .appName("kmeans")
      .master("local")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._


    //1 读取数据


    val data: DataFrame = spark
      .read
      .format("csv")
      .schema("x DOUBLE , y DOUBLE")
      .load("spark/data/kmeans.txt")


    val ds: Dataset[(Double, Double)] = data.as[(Double, Double)]

    //将每一行转换成向量
    val vectorDF: DataFrame = ds
      .map(kv => Array(kv._1, kv._2))
      .toDF("features")

    //2 构建kMeans算法
    val kMeans = new KMeans()

    //设置据类的数量
    kMeans.setK(3)


    //迭代计算确定据类结果
    val model: KMeansModel = kMeans.fit(vectorDF)


    //据类
    val result: DataFrame = model.transform(vectorDF)


    result.show(100000)


  }

}
