package task4

import org.apache.spark.sql.{DataFrame, SparkSession}

object KNNImplement {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName(s"${this.getClass.getCanonicalName}")
      .getOrCreate()


//    import org.apache.spark.sql.types._
//    val schema = (new StructType)
//      .add()

    val sc = spark.sparkContext
    sc.setLogLevel("WARN")
    val N = 5

    val training: DataFrame = spark.read
      .format("csv")
      .option("header", "true")
      .option("inferschema", "true")
//      .schema(schema)
      .load("file:///D:\\projects\\spark_homework\\data\\Iris.csv")
//    training.show(20)

    val test = TestData(5.1, 3.2, 1.4, 0.1)

    val bd = sc.broadcast(test)

    val topN: List[(String, Double)] = training.rdd.aggregate(List[(String, Double)]())(
    (ls, x) => {
      val testData = bd.value
      val d1 = x.getDouble(1) - testData.SepalLengthCm
      val d2 = x.getDouble(2) - testData.SepalWidthCm
      val d3 = x.getDouble(3) - testData.PetalLengthCm
      val d4 = x.getDouble(4) - testData.PetalWidthCm
      val species = x.getString(5)
      val distance = Math.sqrt(d1 * d1 + d2 * d2 + d3 * d3 + d4 * d4)
      (ls :+ (species, distance)).sortBy(_._2).take(N)
    },
      (ls1, ls2) => {
        (ls1 ::: ls2).sortBy(_._2).take(N)
      })

    val predict = sc.makeRDD(topN)
      .map(x => (x._1, 1))
      .reduceByKey(_ + _)
      .sortBy(_._2, false)
      .keys
      .take(1)

    println(predict.toBuffer(0))


    spark.stop()
  }

  case class TestData(SepalLengthCm: Double,SepalWidthCm: Double,PetalLengthCm: Double,PetalWidthCm: Double)
}
