package org.spark

import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
import org.apache.spark.ml.feature.{StandardScaler, VectorAssembler}
import org.apache.spark.sql.{SparkSession, DataFrame}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._

object HumanActivityClassification2 {
  // 创建日志记录器
  val logger: Logger = Logger.getLogger(getClass.getName)

  def main(args: Array[String]): Unit = {
    // 设置日志级别为 WARN，减少日志输出
    Logger.getLogger("org").setLevel(Level.WARN)
    Logger.getLogger("akka").setLevel(Level.WARN)

    // 检查命令行参数是否正确
    if (args.length != 2) {
      System.err.println("使用方法: HumanActivityClassification2 <训练集HDFS路径> <测试集HDFS路径>")
      System.exit(1)
    }

    val trainPath = args(0)
    val testPath = args(1)

    // 创建SparkSession，不设置master，使用spark-submit时指定
    val spark = SparkSession.builder()
      .appName("人体活动分类2")
      .getOrCreate()

    println("Spark会话已成功创建。")

    try {
      // 定义数据的Schema
      val schema = StructType(Array(
        StructField("timestamp", StringType, true),
        StructField("back_x", DoubleType, true),
        StructField("back_y", DoubleType, true),
        StructField("back_z", DoubleType, true),
        StructField("thigh_x", DoubleType, true),
        StructField("thigh_y", DoubleType, true),
        StructField("thigh_z", DoubleType, true),
        StructField("label", DoubleType, true)
      ))

      // 读取训练数据
      println(s"正在加载训练数据: $trainPath")
      val trainDF = spark.read
        .option("header", "true")
        .schema(schema)
        .csv(trainPath)

      // 读取测试数据
      println(s"正在加载测试数据: $testPath")
      val testDF = spark.read
        .option("header", "true")
        .schema(schema)
        .csv(testPath)

      // 移除包含空值的行
      val cleanedTrainDF = trainDF.filter(col("label").isNotNull)
      val cleanedTestDF = testDF.filter(col("label").isNotNull)

      // 定义特征列
      val featureColumns = Array("back_x", "back_y", "back_z",
        "thigh_x", "thigh_y", "thigh_z")

      // 特征组装，将多个特征列合并为一个特征向量
      val assembler = new VectorAssembler()
        .setInputCols(featureColumns)
        .setOutputCol("features")
        .setHandleInvalid("skip") // 跳过包含无效值的行

      println("正在组装特征...")
      val trainAssembled = assembler.transform(cleanedTrainDF)
      val testAssembled = assembler.transform(cleanedTestDF)

      // 特征标准化
      val scaler = new StandardScaler()
        .setInputCol("features")
        .setOutputCol("scaledFeatures")
        .setWithStd(true)
        .setWithMean(true)

      println("正在标准化特征...")
      val scalerModel = scaler.fit(trainAssembled)
      val trainScaled = scalerModel.transform(trainAssembled)
      val testScaled = scalerModel.transform(testAssembled)

      // 训练随机森林模型
      println("正在训练随机森林模型...")
      val rf = new RandomForestClassifier()
        .setLabelCol("label")
        .setFeaturesCol("scaledFeatures")
        .setNumTrees(50) // 增加树的数量以提升模型性能
        .setMaxDepth(10) // 增大树的深度
        .setSeed(42)

      val model = rf.fit(trainScaled)

      // 进行预测
      println("正在进行预测...")
      val predictions = model.transform(testScaled)

      // 评估模型
      println("正在评估模型性能...")
      val evaluatorAccuracy = new MulticlassClassificationEvaluator()
        .setLabelCol("label")
        .setPredictionCol("prediction")
        .setMetricName("accuracy")

      val evaluatorF1 = new MulticlassClassificationEvaluator()
        .setLabelCol("label")
        .setPredictionCol("prediction")
        .setMetricName("f1")

      // 计算准确率和F1-Score
      val accuracy = evaluatorAccuracy.evaluate(predictions)
      val f1Score = evaluatorF1.evaluate(predictions)
      println(f"测试集准确率 = ${accuracy * 100}%.2f%%")
      println(f"测试集F1-Score = ${f1Score * 100}%.2f%%")

      // 生成并显示混淆矩阵
      println("混淆矩阵:")
      predictions.groupBy("label", "prediction").count()
        .orderBy("label", "prediction")
        .show(truncate = false)

      // 显示部分预测结果
      println("部分预测结果:")
      predictions.select("label", "prediction", "features")
        .show(5, truncate = false)

      // 显示特征重要性
      println("特征重要性:")
      val featureImportances = model.featureImportances
      featureColumns.zip(featureImportances.toArray).foreach { case (feature, importance) =>
        println(f"$feature: ${importance}%.4f")
      }

    } catch {
      case e: Exception =>
        println("出现错误: " + e.getMessage)
        e.printStackTrace()
        throw e
    } finally {
      // 让程序保持运行，直到手动终止
      println("Spark会话已成功关闭。程序将保持运行，按 Ctrl+C 停止。")
      Thread.sleep(Long.MaxValue) // 无限期等待，直到手动终止
    }
  }
}
