package com.timeriver.machine_learning.multiclassification

import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

object RandomForestPipeline {
  def main(args: Array[String]): Unit = {
    val session: SparkSession = SparkSession.builder()
      .master("local[6]")
      .appName("随机森林多分类算法")
      .getOrCreate()

    import session.implicits._

    val iris: Dataset[String] = session.read
      .textFile("D:\\workspace\\gitee_space\\spark-ml-machine-learning\\data\\iris.data")

    val data: Dataset[LabeledPoint] = iris.map(_.trim)
      .filter(!_.isEmpty)
      .map(line => {
        val strings: Array[String] = line.split(",")
        val label: Int = if (strings(4).equals("Iris-setosa")) {
          0
        } else if (strings(4).equals("Iris-versicolor")) {
          1
        } else {
          2
        }

        LabeledPoint(label, Vectors.dense(
          strings(0).toDouble,
          strings(1).toDouble,
          strings(2).toDouble,
          strings(3).toDouble
        ))
      })

    val Array(train, test) = data.randomSplit(Array(0.6, 0.4), 123)

    val classifier = new RandomForestClassifier()
      .setNumTrees(10)

    val pipeline: Pipeline = new Pipeline().setStages(Array(classifier))

    val model: PipelineModel = pipeline.fit(train)

    val frame: DataFrame = model.transform(test)

    frame.show(5, false)

    val evaluator = new MulticlassClassificationEvaluator()
      .setLabelCol("label")
      .setPredictionCol("prediction")
      .setMetricName("accuracy")

    val accuracy: Double = evaluator.evaluate(frame)
    println(s"accuracy => ${accuracy}")

    model.save("./model/randomforest")

    session.stop()
  }
}
