package com.example

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, SparkSession}

case class Iris2(
                  sepalLengthCm: Double,
                  sepalWidthCm: Double,
                  petalLengthCm: Double,
                  petalWidthCm: Double,
                  var kind: Int)

object Test5 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName(this.getClass.getCanonicalName)
      .getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("warn")
    var k: Int = 3
    import spark.implicits._
    import org.apache.spark.sql.types._

    // 读取文件，并将文件的所有分类信息全部置空
    val irisRdd: RDD[Iris2] = sc.textFile("data/Iris2.csv")
      .map(line => {
        val strings: Array[String] = line.split(",")
        Iris2(strings(0).toDouble, strings(1).toDouble,
          strings(2).toDouble, strings(3).toDouble, -1
        )

      })

    // 随机选取 k个初始聚类中心
    val originCenters: Array[Iris2] = irisRdd.takeSample(false, k)
    var oldCenters: Array[Iris2] = originCenters
    var newCenters: Array[Iris2] = new Array[Iris2](3)
    var tmp: Int = 0
    oldCenters.foreach(item => {
      item.kind = tmp
      tmp += 1
    })

    println("随机选取 k个初始聚类中心:")
    oldCenters.foreach(println(_))

    println("===================")

    var flag = true
//    var count: Long = 0

    do {
      // 分别计算所有点到初始聚类中心的距离，并确定所有点的新的聚类情况
      val tmpRdd: RDD[Iris2] = irisRdd.map(item => {
        val tmp: Array[Double] = new Array[Double](k)
        tmp(0) = getDistance(item, oldCenters(0))
        tmp(1) = getDistance(item, oldCenters(1))
        tmp(2) = getDistance(item, oldCenters(2))
        Iris2(item.sepalLengthCm, item.sepalWidthCm,
          item.petalLengthCm, item.petalWidthCm, tmp.indexOf(tmp.min))

      })
      // 将RDD转化成DS便于计算新的中心点
      val irisDs: Dataset[Iris2] = tmpRdd.toDS()
      irisDs.createOrReplaceTempView("t1")
      // 计算新的中心点
      newCenters = spark.sql(
        """
          |select
          |  avg(sepalLengthCm) sepalLengthCm, avg(sepalWidthCm) sepalWidthCm,
          |  avg(petalLengthCm) petalLengthCm, avg(petalWidthCm) petalWidthCm, kind
          |from
          |t1
          |group by kind
          |order by kind
          |""".stripMargin).as[Iris2].collect()

      //      newCenters.foreach(println(_))
      var distance: Double = 0.0
      for (i <- 0 to oldCenters.length - 1) {
        distance += getDistance(oldCenters(i), newCenters(i))
      }

      // 如果两次的聚类中心点距离小于0.01则可以认为中心点不再移动，因此需退出循环
      if (distance < 0.01)
        flag = false
      else
        oldCenters = newCenters

//      count += 1
    } while (flag)

    val resultRdd: RDD[Iris2] = irisRdd.map(item => {
      val tmp: Array[Double] = new Array[Double](k)
      tmp(0) = getDistance(item, newCenters(0))
      tmp(1) = getDistance(item, newCenters(1))
      tmp(2) = getDistance(item, newCenters(2))
      Iris2(item.sepalLengthCm, item.sepalWidthCm,
        item.petalLengthCm, item.petalWidthCm, tmp.indexOf(tmp.min))

    })

//    println(count)

    // 展示最终结果
    resultRdd.toDS().createOrReplaceTempView("t2")
    spark.sql("""select * from t2 order by kind""").show(150)

    sc.stop()
  }


  def getDistance(x: Iris2, y: Iris2): Double = {
    import scala.math._
    sqrt(
      square(x.sepalWidthCm - y.sepalWidthCm)
        + square(x.petalLengthCm - y.sepalLengthCm)
        + square(x.petalWidthCm - y.petalWidthCm)
        + square(x.petalLengthCm - y.petalLengthCm))
  }

  def square(x: Double) = x * x

}
