package com.shujia.mlib

import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

object Demo4Kmeans {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("Demo4Kmeans")

    val sc = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    import sQLContext.implicits._

    val lines = sc.textFile("spark/data/kmeans.txt")

    //将数据转成成向量
    val points = lines.map(line => {
      val arr = line.split(",").map(_.toDouble)
      Vectors.dense(arr)
    })

    points.foreach(println)

    /**
      * rdd 版本
      *
      */

    //将数据代入算法 训练模型
    val KMeansModel = org.apache.spark.mllib.clustering.KMeans.train(points, 3, 10)

    //获取所有中心点
    KMeansModel.clusterCenters.foreach(println)


    //判断每个点属于哪一个类
    points.foreach(point => {
      val label = KMeansModel.predict(point)
      print(point)
      print("\t")
      print(label)
      println()
    })


    /**
      * df  版本
      *
      */
    val pointDF = points.map(point => (1, point)).toDF("1", "point")

    //构建模型
    val kmeans = new org.apache.spark.ml.clustering.KMeans()
    kmeans.setK(3)
    //指定输入列
    kmeans.setFeaturesCol("point")
    kmeans.setMaxIter(10)

    //指定输出列
    kmeans.setPredictionCol("prediction")

    //训练模型
    val model = kmeans.fit(pointDF)

    //预测
    val predictionDF = model.transform(pointDF)


    predictionDF.show(14)

  }

}
