package com.yang.spark.mllib.knn

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ArrayBuffer

/**
  * Created by yang on 2018/7/4.
  * knn（k最近邻算法）实现
  * 通常来说，生产环境的测试数据集比训练数据集大，因此将训练数据集作为广播变量
  */
object knn {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("knn")
    val sc = new SparkContext(conf)

    //这个地方的k取值必须小于训练集的大小的开方，否则会出现数组溢出，一般k<20
    val k = 5

    //随机划分样本数据集
    val data = sc.textFile("/train_data").randomSplit(Array(2.0,8.0))
    //定义训练集广播变量
    val trainSet = sc.broadcast(data(1).collect())

    //开始测试数据
    val test_data = data(0).mapPartitions(partition => {
      partition.map(row => {
        val train = trainSet.value
        val arr = new ArrayBuffer[Array[String]]()
        train.foreach(line => {
          arr += eval(line,row)
        })

        //根据欧式距离进行排序并取前K条数据数据
        val data = arr.sortWith(_(5).toDouble < _(5).toDouble).take(k)
        //计算K条数据中预测属性出现的频率
        val p = data.map(x => (x(4),1)).groupBy(_._1).map(t => (t._1, t._2.size)).toArray
        //频率升序排序并取频率最大的key值
        val m = p.sortBy(_._2).takeRight(1).map(_._1)

        row + ",predicted=" + m(0)
      })
    })

    //训练结果存储
    test_data.repartition(1).saveAsTextFile("/test_result")

    sc.stop()
  }

  //计算欧式距离
  def eval(train:String,test:String) = {
    val tr = train.split(",")
    val te = test.split(",")
    val dis = Math.sqrt(Math.pow(tr(0).toDouble - te(0).toDouble,2) +
                        Math.pow(tr(1).toDouble - te(1).toDouble,2) +
                        Math.pow(tr(2).toDouble - te(2).toDouble,2) +
                        Math.pow(tr(3).toDouble - te(3).toDouble,2))
    val data = tr.toBuffer
    data += dis.toString
    data.toArray
  }

}
