package com.shujia.spark.mllib

import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.{SparkConf, SparkContext}

object Demo2kmeans {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf().setMaster("local").setAppName("app")

    val sc = new SparkContext(conf)

    val rdd1 = sc.textFile("spark/data/kmean.txt")

    //将每一行转换成向量
    val rdd2 = rdd1.map(line => {
      val data = line.split(",").map(_.toDouble)
      Vectors.dense(data)
    })

    rdd2.foreach(println)

    //将数据带入算法，训练模型
    val model = KMeans.train(rdd2, 2, 100)

    // 获取每个类的中心点
    model.clusterCenters.foreach(println)

    //分类
    val rdd3 = rdd2.map(vector => {
      //分类
      (vector, model.predict(vector))
    })

    rdd3.foreach(println)

  }

}
