package com.timeriver.machine_learning.clustering

import org.apache.spark.ml.clustering.{KMeans, KMeansModel}
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object KmeansAlg {
  def main(args: Array[String]): Unit = {

    val session: SparkSession = SparkSession.builder()
      .appName("Kmeans聚类算法模型训练")
      .master("local[6]")
      .getOrCreate()

    import session.implicits._

    val iris: Dataset[String] = session.read
      .textFile("D:\\workspace\\gitee_space\\spark-ml-machine-learning\\data\\iris.data")
    iris.show(5, false)

    val data: Dataset[LabeledPoint] = iris.map(_.trim)
      .filter(!_.isEmpty)
      .map(line => {
        val strings: Array[String] = line.split(",")
        val label: Int = if (strings(4).equals("Iris-setosa")) {
          0
        } else if (strings(4).equals("Iris-versicolor")) {
          1
        } else {
          2
        }

        LabeledPoint(label, Vectors.dense(
          strings(0).toDouble,
          strings(1).toDouble,
          strings(2).toDouble,
          strings(3).toDouble
        ))
      })

    val Array(train, test) = data.randomSplit(Array(0.6, 0.4), 123)

    val kmeans: KMeans = new KMeans()
      .setMaxIter(20)
      .setK(3)

    val model: KMeansModel = kmeans.fit(train)

    val frame: DataFrame = model.transform(test)

    frame.show(5, false)

    val success: Long = frame.filter(row =>
      row.getAs[Double]("label") == row.getAs[Int]("prediction")
    ).count()
    val total: Long = frame.count()

    println(s"成功数量：${success}，总数量：${total}。预测成功率：${(success.toDouble / total.toDouble) * 100}%")

    session.stop()
  }
}
