package KNN

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

object KNN {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder().appName("KNN").master("local[*]").getOrCreate()
    spark.sparkContext.setLogLevel("warn")
    val K=9
    //1.读文件A、B，形成数据集X、Y
    val X: DataFrame = spark.read
      .option("header", "true")
      .option("delimiter", ",")
      .option("inferschema", "true")
      .csv("data/IrisA.csv")



    val Y: DataFrame = spark.read
      .option("header", "true")
      .option("delimiter", ",")
      .option("inferschema", "true")
      .csv("data/IrisB.csv")

    //Y.printSchema()
    //Y.show()

    val XtoY: RDD[(Row, Row)] = X.rdd.cartesian(Y.rdd)
    //2.求数据集Y中每个点到数据集X中每个点的距离，得到数据集D
    val D: RDD[(Int, (String, Double))] = XtoY.map {
      case (x, y) =>
        //x的数据
        val xid = x.getInt(0)
        val x1 = x.getDouble(1)
        val x2 = x.getDouble(2)
        val x3 = x.getDouble(3)
        val x4 = x.getDouble(4)
        val xtype = x.getString(5)
        //y的数据
        val yid = y.getInt(0)
        val y1 = y.getDouble(1)
        val y2 = y.getDouble(2)
        val y3 = y.getDouble(3)
        val y4 = y.getDouble(4)
        //3.距离使用欧几里得距离
        val distance = Math.sqrt(
          Math.pow(x1 - y1, 2) +
            Math.pow(x2 - y2, 2) +
            Math.pow(x3 - y3, 2) +
            Math.pow(x4 - y4, 2)
        )
        (yid, (xtype, distance))
    }
    //D.foreach(println)

    //4.找到数据集D中距离最小的K个点
    val resluts: RDD[(Int, List[(String, Double)])] = D.aggregateByKey(List[(String, Double)]())(
      (list, map) => (list :+ map).sortBy(_._2).take(K),

      (list1, list2) => (list1 ++ list2).sortBy(_._2).take(K)
    ).sortByKey()
    //5.求K个点的分类情况

    resluts.map {
      case (x, y) =>
        (x,y.head._1)
    }.foreach(println)

    //6.得出结果





  }
}
