package ds_industry_2025.industry.gy_09.T5

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.tuning.{CrossValidator, ParamGridBuilder}
import org.apache.spark.sql.SparkSession
/*
    1、根据子任务一的结果，建立随机森林（随机森林相关参数可自定义，不做限制），使用子任务一的结果训练随机森林模型，然后再将hudi
    中dwd.fact_machine_learning_data_test（该表字段含义与dwd.fact_machine_learning_data表相同，machine_record_state
    列值为空，表结构自行查看）转成向量，预测其是否报警将结果输出到MySQL数据库shtd_industry中的ml_result表中（表结构如下）。在Linux的MySQL命令行中查询出machine_record_id为1、8、20、28和36的5条数据，将SQL语句复制并粘贴至客户端桌面【Release\任务C提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务C提交结果.docx】中对应的任务序号下。
ml_result表结构：
 */
object t1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t1")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.hoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val source = spark.table("dwd.fact_machine_learning_data")
      .withColumnRenamed("machine_record_state","label")

    val cols = source.columns.slice(3, source.columns.length - 5)

    val Array(rating,test)=source.randomSplit(Array(0.8,0.2))


    val assembler = new VectorAssembler()
      .setInputCols(cols)
      .setOutputCol("features")


    val classifier = new RandomForestClassifier()

    val pipeline = new Pipeline()
      .setStages(Array(assembler,classifier))

    //  超参数网格
    val paramGridBuilder = new ParamGridBuilder()
      .addGrid(classifier.numTrees,Array(10,20,30))
      .addGrid(classifier.maxDepth,Array(5,10,15))
      .addGrid(classifier.impurity,Array("entropy","gini"))
      .build()

    val cross = new CrossValidator()
      .setEstimatorParamMaps(paramGridBuilder)
      .setEstimator(pipeline)
      .setEvaluator(new MulticlassClassificationEvaluator())
      .setParallelism(4)

    //  todo 训练模型
    val moduler = cross.fit(rating)

    //  todo 预测
    val result = moduler.transform(test)
      .select("machine_record_id","prediction")
      .withColumnRenamed("prediction","machine_record_state")

    result.show

    spark.close()
  }

}
