package com.lpssfxy.offline

import breeze.numerics.sqrt
import com.lpssfxy.offline.utils.AppUtils
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object ALSModelEvaluation {

  private val SPLIT_SEED = 42L // 定义随机分割的种子，确保结果可复现

  def main(args: Array[String]): Unit = {
    var spark: SparkSession = null
    try {
      // 1. 创建 SparkSession 对象
      spark = AppUtils.createSparkSession("ALSModelEvaluation",AppUtils.getSparkCores)
      // 2. 读取数据并转换为 Rating样例类
      val ratingRDD = AppUtils.loadRatingData(spark).map(x => Rating(x._1, x._2, x._3))
      // 3. 缓存 ratingRDD，避免重复计算
      ratingRDD.cache()
      // 4. 将ratingDF切分成训练集与测试集，随机切分80%的数据作为训练集，20%作为测试集
      val Array(trainRDD, testRDD) = ratingRDD.randomSplit(Array(0.8, 0.2), SPLIT_SEED)
      // 5. 缓存训练集和测试集，提高后续计算性能
      trainRDD.cache()
      testRDD.cache()
      // 6. 调用方法调整 ALS 模型参数
      val bestParams = adjustALSModelParams(trainRDD, testRDD)
      // 7. 记录最优参数信息
      println(s"Best parameters: rank = ${bestParams._1}, iterations = ${bestParams._2}, lambda = ${bestParams._3}, RMSE = ${bestParams._4}")

      // 8. 重新训练模型
      val model = ALS.train(trainRDD, bestParams._1, bestParams._2, bestParams._3)
      val predictRdd = model.predict(testRDD.map(x => (x.user, x.product)))
      predictRdd.foreach(x => println(s"调整参数之后的预测结果：${x}"))

    } catch {
      case e: Exception =>
        println("Error occurred during ALS model evaluation", e)
    } finally {
      if (spark != null) {
        spark.stop() // 确保 SparkSession 被关闭，释放资源
      }
    }
  }

  /**
   * 使用网格搜索确定ALS模型的最佳超参数
   *
   * @param trainRDD
   * @param testRDD
   * @return
   */
  private def adjustALSModelParams(trainRDD: RDD[Rating], testRDD: RDD[Rating]): (Int, Int, Double, Double) = {
    // 定义 rank、iterations 和 lambda 的可选值
    val rankOptions = Array(5, 10, 20, 50)
    val iterationOptions = Array(5, 10, 15, 20)
    val lambdaOptions = Array(1.0, 0.1, 0.01)
    // 生成所有参数组合
    val paramCombinations = for {
      rank <- rankOptions
      iterations <- iterationOptions
      lambda <- lambdaOptions
    } yield (rank, iterations, lambda)

    // 评估每个参数组合
    val results = paramCombinations.map { case (rank, iterations, lambda) =>
      var attempt = 0 // 尝试次数，默认0次
      var model: MatrixFactorizationModel = null // ALS模型
      var rmse: Double = Double.MaxValue // 默认设置rmse为Double.MaxValue(MaxValue = 1.7976931348623157E308D)
      while (attempt < 3 && rmse == Double.MaxValue) { // 对每个参数组合的训练进行了重试，最多重试 3 次，避免因为一次失败而放弃该参数组合。
        try {
          model = ALS.train(trainRDD, rank, iterations, lambda) // 训练模型
          rmse = getRMSE(model, testRDD) // 计算 RMSE
        } catch {
          case e: Exception =>
            println(s"Failed to train model (attempt $attempt) with rank = $rank, iterations = $iterations, lambda = $lambda: ${e.getMessage}")
            attempt += 1
        }
      }
      (rank, iterations, lambda, rmse)
    }
    // 找到 RMSE 最小的参数组合
    results.minBy(_._4)
  }

  /**
   * 计算RMSE
   *
   * @param model
   * @param testData
   * @return
   */
  private def getRMSE(model: MatrixFactorizationModel, testData: RDD[Rating]): Double = {
    // 1. 提取用户和商品信息
    val userProducts = testData.map(item => (item.user, item.product))
    // 2. 模型预测
    val predictedRdd = model.predict(userProducts)
    // 3. 提取真实评分，数据结构：((userId,productId),rating)
    val observed = testData.map(item => ((item.user, item.product), item.rating)).cache()
    // 4. 提取预测评分
    val predict = predictedRdd.map(item => ((item.user, item.product), item.rating)).cache()
    // 5. 根据公式，计算RMSE的值
    val rmse = sqrt(
      observed.join(predict)
        .map { case ((_, _), (real, predict)) =>
          val error = real - predict
          error * error
        }.mean()
    )
    // 释放缓存
    observed.unpersist()
    predict.unpersist()
    rmse
  }
}