import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._

import scala.collection.mutable.ListBuffer

object t1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local").getOrCreate()
    import spark.implicits._

    val train = spark.table("dwd.fact_machine_learning_data")
    val test = spark.read.format("hudi").load("hdfs://192.168.23.60:9000/user/hive/warehouse/dwd.db/fact_machine_learning_data_test")

    val assembler = (new VectorAssembler()
      .setInputCols(
        train
          .drop("_hoodie_commit_time")
          .drop("_hoodie_commit_seqno")
          .drop("_hoodie_record_key")
          .drop("_hoodie_partition_path")
          .drop("_hoodie_file_name")
          .drop("dwd_insert_user")
          .drop("dwd_modify_user")
          .drop("dwd_insert_time")
          .drop("dwd_modify_time")
          .drop("machine_record_state")
          .drop("machine_record_date").columns
      ).setOutputCol("features"))

// 找到最适合的num_trees
    val range = (5 to 200 by 10)
    val buffer = new ListBuffer[Map[Int,Double]]()
    for(i <- range.indices){
      val rfc = new RandomForestClassifier().setLabelCol("machine_record_state").setSeed(1L).setNumTrees(range(i))

      val model = rfc.fit(assembler.transform(train))

      val result = model.transform(assembler.transform(test))

      val accuracy = new MulticlassClassificationEvaluator().setLabelCol("machine_record_state").setMetricName("accuracy").evaluate(result)

      buffer.append(Map(range(i)-> accuracy))
    }

    buffer.sortBy(_.values).foreach(println)
    // 运行得到12棵树的时候最佳,精确度为0.6752958579881657

// 探寻max_depth
    val range_min_leaf = 1 to 20
    val buffer1 = new ListBuffer[Map[Int,Double]]()
    for(i <- range_min_leaf.indices){
      val rfc = new RandomForestClassifier().setLabelCol("machine_record_state").setSeed(1L).setNumTrees(12).setMinInstancesPerNode(range_min_leaf(i))

      val model = rfc.fit(assembler.transform(train))

      val result = model.transform(assembler.transform(test))

      val accuracy = new MulticlassClassificationEvaluator().setLabelCol("machine_record_state").setMetricName("accuracy").evaluate(result)

      buffer1.append(Map(range_min_leaf(i)-> accuracy))
    }

    buffer1.sortBy(_.values).foreach(println)
    // 精确度没改变，所以用默认

// 探寻max_features
    val range_max_features = List("auto","all","onethird","sqrt","log2","n")
    val buffer2 = new ListBuffer[Map[String,Double]]()
    for(i <- range_max_features.indices){
      val rfc = new RandomForestClassifier().setLabelCol("machine_record_state").setSeed(1L).setNumTrees(12).setFeatureSubsetStrategy(range_max_features(i))

      val model = rfc.fit(assembler.transform(train))

      val result = model.transform(assembler.transform(test))

      val accuracy = new MulticlassClassificationEvaluator().setLabelCol("machine_record_state").setMetricName("accuracy").evaluate(result)

      buffer2.append(Map(range_max_features(i) -> accuracy))
    }

    buffer2.sortBy(_.values).foreach(println)

    // 得到当max_features为all时精确度最高，达到了0.8402366863905325,故使用all

    // 最后使用上面调整的参数
    val rfc = new RandomForestClassifier().setLabelCol("machine_record_state").setSeed(1L).setNumTrees(12).setFeatureSubsetStrategy("all")

    val model = rfc.fit(assembler.transform(train))

    val result = model.transform(assembler.transform(test))

    val accuracy = new MulticlassClassificationEvaluator().setLabelCol("machine_record_state").setMetricName("accuracy").evaluate(result)

  }
}
