package com.lpssfxy.offline.optimizer

import com.lpssfxy.offline.entities.{ProductRecs, Recommendation}
import com.lpssfxy.offline.utils.AppUtils
import org.apache.spark.sql
import org.apache.spark.sql.SparkSession
import org.jblas.DoubleMatrix
import org.apache.spark.ml.tuning.{ParamGridBuilder, CrossValidator}
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.functions._

object CalculateProductSimRecList {

  def main(args: Array[String]): Unit = {
    // 设置日志级别
    Logger.getLogger("org").setLevel(Level.ERROR)

    // 记录开始时间
    val startTime = System.currentTimeMillis()

    // 创建SparkSession
    val spark = AppUtils.createSparkSession("CalculateProductSimRecList",AppUtils.getSparkCores)
    import spark.implicits._

    // 加载数据
    val ratingRDD = AppUtils.loadRatingData(spark)

    // 过滤低质量数据，例如评分小于 1 的数据
    val filteredRatingRDD = ratingRDD.filter { case (_, _, rating) => rating >= 1 }

    // 划分训练集和测试集
    val Array(training, test) = filteredRatingRDD.toDF("userId", "productId", "rating").randomSplit(Array(0.8, 0.2))

    // 计算商品推荐
    val (productRecsDF, rmse) = calculateProductRecommendations(spark, training, test)

    // 输出模型评估结果
    println(s"模型均方根误差 (RMSE): $rmse")

    // 验证推荐结果的准确度（用户维度）
    val userHitRate = validateRecommendationAccuracy(spark, productRecsDF, test)
    println(s"推荐结果的用户命中率: $userHitRate")

    // 验证推荐结果的准确度（商品维度）
    val (precision, avgSimilarity) = validateProductRecommendationAccuracy(spark, productRecsDF)
    println(s"推荐结果的商品精准率: $precision")
    println(s"推荐列表的平均相似度: $avgSimilarity")

    // 将商品推荐结果写入MongoDB
    AppUtils.saveRecommendationsToMongoDB(productRecsDF, AppUtils.MONGODB_PRODUCT_RECS_COLLECTION)

    // 停止SparkSession
    spark.stop()

    // 记录结束时间
    val endTime = System.currentTimeMillis()
    // 计算耗时
    val elapsedTime = endTime - startTime
    println(s"程序执行耗时: ${elapsedTime} 毫秒")
  }

  /**
   * 计算两个商品之间的余弦相似度
   *
   * @param product1
   * @param product2
   * @return
   */
  private def cosSim(product1: DoubleMatrix, product2: DoubleMatrix): Double = {
    product1.dot(product2) / (product1.norm2() * product2.norm2())
  }

  /**
   * 计算商品推荐列表
   *
   * @param spark    SparkSession对象
   * @param training 训练数据的DataFrame
   * @param test     测试数据的DataFrame
   * @return 商品推荐列表的DataFrame和模型的均方根误差
   */
  private def calculateProductRecommendations(spark: SparkSession, training: sql.DataFrame, test: sql.DataFrame): (sql.DataFrame, Double) = {
    import spark.implicits._

    // 创建ALS模型
    val als = new ALS()
      .setUserCol("userId")
      .setItemCol("productId")
      .setRatingCol("rating")
      .setColdStartStrategy("drop")

    // 定义更广泛的参数网格
    val paramGrid = new ParamGridBuilder()
      .addGrid(als.rank, Array(5, 10, 15)) // 5, 10, 15, 20
      .addGrid(als.maxIter, Array(10, 15, 20)) // 10, 15, 20, 25
      .addGrid(als.regParam, Array(0.01, 0.1, 0.5)) // 0.01, 0.1, 0.5, 1.0
      .build()

    // 创建CrossValidator进行参数调优
    val crossValidator = new CrossValidator()
      .setEstimator(als)
      .setEvaluator(new RegressionEvaluator().setLabelCol("rating"))
      .setEstimatorParamMaps(paramGrid)
      .setNumFolds(3)

    // 训练模型
    val model = crossValidator.fit(training).bestModel.asInstanceOf[org.apache.spark.ml.recommendation.ALSModel]

    // 对测试集进行预测
    val predictions = model.transform(test)

    // 评估模型
    val evaluator = new RegressionEvaluator()
      .setMetricName("rmse")
      .setLabelCol("rating")
      .setPredictionCol("prediction")
    val rmse = evaluator.evaluate(predictions)

    // 获取商品的特征矩阵，使用itemFactors方法
    val productFeatures = model.itemFactors.rdd.map { row =>
      val productId = row.getAs[Int]("id")
      // 获取WrappedArray[Float]
      val floatFeatures = row.getAs[scala.collection.mutable.WrappedArray[Float]]("features")
      // 将Float元素转换为Double元素
      val doubleFeatures = floatFeatures.map(_.toDouble).toArray
      (productId, new DoubleMatrix(doubleFeatures))
    }

    // 广播商品特征
    val broadcastProductFeatures = spark.sparkContext.broadcast(productFeatures.collect())

    // 计算相似度
    val productRecs = productFeatures.map { case (productId, features) =>
      val simScores = broadcastProductFeatures.value
        .filter(_._1 != productId)
        .map { case (otherProductId, otherFeatures) =>
          val simScore = cosSim(features, otherFeatures)
          (otherProductId, simScore)
        }.filter(_._2 > 0.6)
        .sortBy(-_._2)
        .take(30)
      ProductRecs(productId, simScores.map(x => Recommendation(x._1, x._2)))
    }

    (productRecs.toDF(), rmse)
  }

  /**
   * 验证推荐结果的准确度（用户维度）
   *
   * @param spark         SparkSession对象
   * @param productRecsDF 商品推荐列表的DataFrame
   * @param test          测试数据的DataFrame
   * @return 推荐结果的命中率
   */
  private def validateRecommendationAccuracy(spark: SparkSession, productRecsDF: sql.DataFrame, test: sql.DataFrame): Double = {
    import spark.implicits._

    // 将测试数据按用户分组
    val testGrouped = test.groupBy("userId").agg(collect_set("productId").as("actualProducts"))

    // 将推荐结果按用户分组
    val recGrouped = productRecsDF.select($"productId".as("userId"), explode($"recs.productId").as("recommendedProduct"))
      .groupBy("userId").agg(collect_set("recommendedProduct").as("recommendedProducts"))

    // 关联测试数据和推荐结果
    val joined = testGrouped.join(recGrouped, Seq("userId"))

    // 计算命中的用户数
    val hitCount = joined.filter { row =>
      val actualProducts = row.getAs[Seq[Int]]("actualProducts")
      val recommendedProducts = row.getAs[Seq[Int]]("recommendedProducts")
      actualProducts.intersect(recommendedProducts).nonEmpty
    }.count()

    // 计算总用户数
    val totalCount = testGrouped.count()

    // 计算命中率
    hitCount.toDouble / totalCount.toDouble
  }

  /**
   * 验证推荐结果的准确度（商品维度）
   *
   * @param spark         SparkSession对象
   * @param productRecsDF 商品推荐列表的DataFrame
   * @return 推荐结果的精准率和推荐列表的平均相似度
   */
  /**
   * 验证推荐结果的准确度（商品维度）
   *
   * @param spark          SparkSession对象
   * @param productRecsDF  商品推荐列表的DataFrame
   * @return               推荐结果的精准率和推荐列表的平均相似度
   */
  private def validateProductRecommendationAccuracy(spark: SparkSession, productRecsDF: sql.DataFrame): (Double, Double) = {
    import spark.implicits._

    // 假设 realRelatedProducts 是从 MongoDB 的 relatedProductSimList 集合中加载的 DataFrame，作为真正的关联商品
    val realRelatedProducts = spark.read
      .option("uri", AppUtils.getMongoUri)
      .option("collection", "relatedProductSimList")
      .format("com.mongodb.spark.sql")
      .load()

    // 展开推荐列表
    val expandedRecs = productRecsDF.select($"productId", explode($"recs").as("rec"))
      .select($"productId", $"rec.productId".as("recommendedProductId"), $"rec.score".as("similarity"))

    // 展开真实相关商品列表
    val expandedRealRecs = realRelatedProducts.select($"productId", explode($"recs").as("rec"))
      .select($"productId", $"rec.productId".as("realProductId"))

    // 关联推荐列表和真实相关商品列表
    val joined = expandedRecs.join(expandedRealRecs, Seq("productId"), "left_outer")
      .withColumn("isHit", $"recommendedProductId" === $"realProductId")

    // 计算精准率
    val precision = joined.agg(avg(when($"isHit", 1).otherwise(0))).as[Double].first()

    // 计算推荐列表的平均相似度
    val avgSimilarity = expandedRecs.agg(avg($"similarity")).as[Double].first()

    (precision, avgSimilarity)
  }
}