package com.yang.spark.mllib.kmeans

import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by yang on 2018/7/5. 
  */
object mykmeans {

  def main(args: Array[String]): Unit = {
    val conf =new SparkConf().setAppName("kmeans")
    val sc = new SparkContext(conf)

    //加载训练集数据
    val data = sc.textFile("/train_data")
    val trainData = data.mapPartitions(line => {
      line.map(row => {
        Vectors.dense(row.split(",").takeRight(2).map(_.toDouble))
      })
    }).cache()

    //设置聚类簇
    val num = 3
    //设置迭代次数
    val numIter = 30

    //训练生成模型
    val model = KMeans.train(trainData,num,numIter)

    //输出模型质心
    model.clusterCenters.foreach(println(_))

    //输出样本数据收敛性（收敛值越低效果越好）
    println("K-means Cost:" + model.computeCost(trainData))

    data.mapPartitions(line => {
      line.map(row => {
        row + ":predict=" + model.predict(Vectors.dense(row.split(",").takeRight(2).map(_.toDouble)))
      })
    }).saveAsTextFile("/test_data")

    sc.stop()
  }

}
