package homework5

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object KMeansDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("KNNDemo")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext

    import spark.implicits._
    //加载样本数据
    val df1: DataFrame = spark.read
      .options(Map(("header", "true"), ("inferschema", "true")))
      .csv("data/Iris.csv")

    val firstRDD: RDD[(Double, Double, Double, Double)] = df1.rdd.map(item =>
      (item.getAs[Double](1), item.getAs[Double](2), item.getAs[Double](3), item.getAs[Double](4))
    )

    var first1: (Double, Double, Double, Double) = (5.1, 3.5, 1.4, 0.2)
    var first2 = (4.4, 2.0, 4.0, 1.5)

    val count = 20
    var i = 0
    while (true) {
      i += 1

      //将样本数据按照中心点分类
      val resultRDD: RDD[(Int, ((Double, Double, Double, Double), Int))] = firstRDD.map(item => {
        val distance1 = Math.pow(item._1 - first1._1, 2) + Math.pow(item._2 - first1._2, 2) + Math.pow(item._3 - first1._3, 2) + Math.pow(item._4 - first1._4, 2)
        val distance2 = Math.pow(item._1 - first2._1, 2) + Math.pow(item._2 - first2._2, 2) + Math.pow(item._3 - first2._3, 2) + Math.pow(item._4 - first2._4, 2)
        if (distance1 > distance2) (2, (item, 1)) else (1, (item, 1))
      })

      //统计出距离各中心点的距离之和以及数量，方便求平均值
      val resultMap: collection.Map[Int, ((Double, Double, Double, Double), Int)] = resultRDD.reduceByKey(
        (x, y) => (((x._1._1 + y._1._1), (x._1._2 + y._1._2), (x._1._3 + y._1._3), (x._1._4 + y._1._4)), (x._2 + y._2))
      ).collectAsMap()
      println("----------------------------------")
      println(resultMap.toBuffer)

      //计算出新的中心点
      val last1 = (resultMap(1)._1._1 / resultMap(1)._2, resultMap(1)._1._2 / resultMap(1)._2, resultMap(1)._1._3 / resultMap(1)._2, resultMap(1)._1._4 / resultMap(1)._2)
      val last2 = (resultMap(2)._1._1 / resultMap(2)._2, resultMap(2)._1._2 / resultMap(2)._2, resultMap(2)._1._3 / resultMap(2)._2, resultMap(2)._1._4 / resultMap(2)._2)

      //计算新旧中心点之间的距离
      val distance1 = Math.pow(last1._1 - first1._1, 2) + Math.pow(last1._2 - first1._2, 2) + Math.pow(last1._3 - first1._3, 2) + Math.pow(last1._4 - first1._4, 2)
      val distance2 = Math.pow(last2._1 - first2._1, 2) + Math.pow(last2._2 - first2._2, 2) + Math.pow(last2._3 - first2._3, 2) + Math.pow(last2._4 - first2._4, 2)

      println("旧中心点:")
      println(s"$first1 : $first2")
      println("新中心点:")
      println(s"$last1 : $last2")
      //将新的中心点覆盖旧的中心点
      first1 = last1
      first2 = last2
      if (i >= count) {
        println(s"计算${count}次退出")
        println(s"两个中心点为:${last1} - ${last2}")
        return
      }
      if (distance1 <= 0.0000001 && distance2 <= 0.0000001) {
        println(s"中心值不再变化,退出")
        println(s"两个中心点为:${last1} - ${last2}")
        return
      }
    }

    //划分未知的数据,只需将每条数据和各中心点进行比较，离得较近的中心点，则属于该类

    val df2: DataFrame = spark.read
      .options(Map(("header", "true"), ("inferschema", "true")))
      .csv("data/Iris2.csv")

    val rdd: RDD[(Double, Double, Double, Double)] = df2.rdd.map(item =>
      (item.getAs[Double](1), item.getAs[Double](2), item.getAs[Double](3), item.getAs[Double](4))
    )

    val resultRDD: Iterable[Unit] = rdd.map(item => {
      val distance1 = Math.pow(item._1 - first1._1, 2) + Math.pow(item._2 - first1._2, 2) + Math.pow(item._2 - first1._3, 2) + Math.pow(item._2 - first1._4, 2)
      val distance2 = Math.pow(item._1 - first2._1, 2) + Math.pow(item._2 - first2._2, 2) + Math.pow(item._2 - first2._3, 2) + Math.pow(item._2 - first2._4, 2)
      if (distance1 > distance2) (item, 2) else ( item, 1)
    }).collectAsMap().map(v => println(v._1, v._2))
  }
}
