package com.shujia.mllib

import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}
import org.apache.spark.ml.clustering.{KMeans, KMeansModel}

object Demo7Kmeans {

  def main(args: Array[String]): Unit = {

    /**
      * 据类算法-kmeana   无监督机器学习
      *
      */

    val spark: SparkSession = SparkSession.builder()
      .master("local[8]")
      .appName("person")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    import spark.implicits._


    val pointDF: DataFrame = spark.read.format("csv")
      .schema("x DOUBLE, y DOUBLE")
      .load("spark/data/kmeans.txt")

    val ds: Dataset[(Double, Double)] = pointDF.as[(Double, Double)]

    //将每一行转换成一个数组
    val train: DataFrame = ds.map(point => Array(point._1, point._2)).toDF("features")


    //构建算法
    val means: KMeans = new KMeans().setK(2)


    //训练模型
    val model: KMeansModel = means.fit(train)


    //聚类
    val frame: DataFrame = model.transform(train)

    frame.show(10000)
  }

}
