package com.jhhe.homework4_2

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Row, SparkSession}

object KNN {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("KNN")
      .master("local[*]")
      .getOrCreate()
    spark.sparkContext.setLogLevel("warn")

    val X = spark.read
      .option("header", "true")
      .option("delimiter", ",")
      .option("inferschema", "true")
      .csv("data/IrisA.csv")

    val Y = spark.read
      .option("header", "true")
      .option("delimiter", ",")
      .option("inferschema", "true")
      .csv("data/IrisB.csv")

    val value: RDD[(Row, Row)] = X.rdd.cartesian(Y.rdd)
    // 求数据集Y中每个点到数据集X中每个点的距离，得到数据集D
    val D = value.map {
      case (x, y) =>
        val xid = x.getInt(0)
        val xa = x.getDouble(1)
        val xb = x.getDouble(2)
        val xc = x.getDouble(3)
        val xd = x.getDouble(4)
        val species = x.getString(5)
        val yid = y.getInt(0)
        val ya = y.getDouble(1)
        val yb = y.getDouble(2)
        val yc = y.getDouble(3)
        val yd = y.getDouble(4)
        // 计算距离
        val distance = Math.sqrt(
          Math.pow(xa - ya, 2) +
            Math.pow(xb - yb, 2) +
            Math.pow(xc - yc, 2) +
            Math.pow(xd - yd, 2)
        )
        (yid, (species, distance))
    }

    val K = 13

    D.groupByKey().mapValues{element =>
      val tuples: List[(String, Double)] = element.toList.sortBy(_._2).take(K)
      val tupleList: Map[String, List[(String, Double)]] = tuples.groupBy(_._1)
      val lst = tupleList.toList.sortBy(_._2.size).last._2.map(_._1)
      //val lst: List[String] = tupleList.toList.sortWith(_._2.size > _._2.size).head._2.map(_._1)
      lst(0)
    }.foreach(x => println(s"id:${x._1},种类：${x._2}"))

    spark.close()
  }
}
