package com.shujia.mllib

import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.clustering.{KMeans, KMeansModel}

object Kmeans {

  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf().setMaster("local").setAppName("test")
    val sc: SparkContext = new SparkContext(conf)


    val data: RDD[String] = sc.textFile("spark/data/kmeans.txt")

    //将数据转换成向量
    val vectors: RDD[linalg.Vector] = data.map(line => {
      val array: Array[Double] = line.split(",").map(_.toDouble)

      Vectors.dense(array)
    })


    //训练模型  确定每一个类中心点的过程
    /**
      * 第一个参数是数据
      * 第二个参数是类的数量
      * 第三个参数是最大的迭代次数
      *
      */
    val model: KMeansModel = KMeans.train(vectors, 2, 10)

    //获取中心点
    val centers: Array[linalg.Vector] = model.clusterCenters

    println(centers.toList)


    vectors.foreach(point => {
      //判断点属于哪一个类
      val value: Int = model.predict(point)

      println(point + "\t" + value)

    })

  }

}
