package task5

import org.apache.commons.lang.math.NumberUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object KMeansImplement {
  def main(args: Array[String]): Unit = {
    // 配置spark环境
    val conf = new SparkConf()
      .setAppName(s"${this.getClass.getCanonicalName}")
      .setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("ERROR")

    // 读取文件
    val data = sc.textFile("file:///D:\\projects\\spark_homework\\data\\Iris.csv")
      .map(_.split(",").filter(NumberUtils.isNumber).map(_.toDouble))
      .filter(!_.isEmpty)
      .map(x => Flower(x(1), x(2), x(3), x(4)))
//    data.take(5).foreach(println)

    // 设定一些参数
    var i = 0                               // 迭代次数
    var move_dist: Double = Double.MaxValue // 中心点移动距离
    var min_move_dist = 0.001                // 中心点移动距离阈值
    val N = 3                               // 聚类个数
    var result: RDD[(Flower, Int)] = null   // 聚类结果

    // 随机选取一些中心点
    var init_centers: Array[(Flower, Int)] = data.takeSample(false, N).zipWithIndex
    //    init_centers.foreach(println)

    // 迭代计算
    while (i < 1000 && move_dist >= min_move_dist) {
      println(s"====================$i 循环， 移动距离为 $move_dist ===========================")
      val bd_centers = sc.broadcast(init_centers)
      val centers = bd_centers.value.map(x => (x._2, x._1)).toMap

      // 计算每个点所属的类别
      val clustered: RDD[(Int, (Flower, Int))] = data.map(f => {
        val centers = bd_centers.value
        val tp = centers
          .map(center => (center._2, center._1.distance(f)))
          .minBy(_._2)._1
        (tp, (f, 1))
      })

      // 计算新的中心点
      val new_centers: Array[(Flower, Int)] = clustered
        .reduceByKey { case (a, b) => (a._1.add(b._1), a._2 + b._2) }
        .mapValues { case (f, c) => Flower(f.a / c, f.b / c, f.c / c, f.d / c) }
        .map(x => (x._2, x._1))
        .collect

      println("*****新中心点如下*****")
      new_centers.foreach(println)
      println("**********************")

      // 设置当前结果
      result = clustered.map(x => (x._2._1, x._1))

      // 计算新旧中心点移动距离
      move_dist = new_centers
        .map { case (f, i) =>
          centers(i).distance(f)
        }
        .sum

      // 更新中心点
      init_centers = new_centers
      // 迭代次数加1
      i += 1
    }

    result.foreach(println)

    sc.stop()
  }

  case class Flower(a: Double, b: Double, c: Double, d: Double) {
    def distance(other: Flower): Double = {
      math.pow(a - other.a, 2) + math.pow(b - other.b, 2) + math.pow(c - other.c, 2) + math.pow(d - other.d, 2)
    }

    def add(other: Flower): Flower = {
      Flower(a + other.a, b + other.b, c + other.c, d + other.d)
    }
  }

}
