package com.shujia.mllib

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.clustering.KMeansModel
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.linalg.{Matrices, Matrix, Vector, Vectors}
import org.apache.spark.sql.SQLContext

object Demo3Kmeans {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf().setMaster("local").setAppName("kmeans")
    val sc = new SparkContext(conf)
    val data = sc.textFile("data/kmeans.txt")
    val sQLContext = new SQLContext(sc)
    import sQLContext.implicits._
    //构建训练集
    val trainRDD: RDD[Vector] = data.map(line => {
      val arr = line.split(",").map(_.toDouble)
      Vectors.dense(arr)
    })

    /**
      * train  训练模型
      *
      * data  数据集  每一行是一个向量
      * k  类别
      * maxIterations  最大迭代次数
      */
   /* val model: KMeansModel = org.apache.spark.mllib.clustering.KMeans.train(data = trainRDD, k = 2, maxIterations = 10)

    //获取中心点
    model.clusterCenters.foreach(println)

    //对数据进行分类
    model.predict(trainRDD).foreach(println)*/

   /**
      * 2、使用DataFrame
      */

    val trainDF = trainRDD.map(v => (1, v)).toDF("l", "point")

    trainDF.show()

    /**
      * 新版本kmeans
      *
      */

    //构建模型
    val kmeans = new org.apache.spark.ml.clustering.KMeans()
      .setK(2)
      .setFeaturesCol("point")
      .setPredictionCol("prediction")

    val model1 = kmeans.fit(trainDF)

    //获取中心点
    model1.clusterCenters.foreach(println)

    //将数据进行分类
    val resultDF = model1.transform(trainDF)

    resultDF.show()


  }
}
