package homework4

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object KNNDemo {


  def main(args: Array[String]): Unit = {

    val spark = SparkSession
      .builder()
      .appName("KNNDemo")
      .master("local[*]")
      .getOrCreate()


    import spark.implicits._
    val sc = spark.sparkContext

    val df1: DataFrame = spark.read
      .options(Map(("header", "true"), ("inferschema", "true")))
      .csv("data/Iris.csv")

    //读取样本数据
    val firstRDD: RDD[(String, (Double, Double, Double, Double))] = df1.rdd.map(item => (
      item.getAs[String](5), (item.getAs[Double](1), item.getAs[Double](2), item.getAs[Double](3), item.getAs[Double](4))
    ))

    val K = 10
    //读取待检测的数据
    val df2: DataFrame = spark.read
      .options(Map(("header", "true"), ("inferschema", "true")))
      .csv("data/Iris2.csv")
    val data = df2.rdd.map(item =>
      (item.getAs[Double](1), item.getAs[Double](2), item.getAs[Double](3), item.getAs[Double](4))
    ).collect()

    val result: Array[((Double, Double, Double, Double), String)] = data.map(v => {
      //计算出样本和待检测数据之间的距离，并排序，选出前K个值
      val resultArray: Array[(Double, String)] = firstRDD.map(item => (
        Math.pow(item._2._1 - v._1, 2) + Math.pow(item._2._1 - v._1, 2) + Math.pow(item._2._1 - v._1, 2) + Math.pow(item._2._1 - v._1, 2), item._1)
      ).sortByKey().take(K)

      //统计每种类型的数量
      val resultRDD = sc.makeRDD(resultArray)
      val resultMap: collection.Map[String, Long] = resultRDD.map(item => (item._2, item._1)).countByKey()

      //选出被选中最多的类型
      var result = ("", 0L)
      resultMap.foreach(item =>
        if (item._2 > result._2) {
          result = (item._1, item._2)
        }
      )
      println(s"$v, ${result._1}")
      (v, result._1)
    })

  }

}
