package com.lpssfxy.offline

import com.lpssfxy.offline.entities.{ ProductRecs, Recommendation}
import com.lpssfxy.offline.utils.AppUtils
import org.apache.spark.sql
import org.apache.spark.sql.SparkSession
import org.jblas.DoubleMatrix
import org.apache.spark.ml.tuning.{ParamGridBuilder, TrainValidationSplit}
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.ml.evaluation.RegressionEvaluator

object CalculateProductSimRecList {

  def main(args: Array[String]): Unit = {
    // 记录开始时间
    val startTime = System.currentTimeMillis()
    // 创建SparkSession
    val spark = AppUtils.createSparkSession("CalculateProductSimRecList",AppUtils.getSparkCores)
    // 加载数据
    val ratingRDD = AppUtils.loadRatingData(spark)
    // 过滤低质量数据，例如评分小于 1 的数据
    val filteredRatingRDD = ratingRDD.filter { case (_, _, rating) => rating >= 1 }
    // 计算商品推荐
    val productRecsDF = calculateProductRecommendations(spark, filteredRatingRDD)
    // 将商品推荐结果写入MongoDB
    AppUtils.saveRecommendationsToMongoDB(productRecsDF, AppUtils.MONGODB_PRODUCT_RECS_COLLECTION)
    // 停止SparkSession
    spark.stop()

    // 记录结束时间
    val endTime = System.currentTimeMillis()
    // 计算耗时
    val elapsedTime = endTime - startTime
    println(s"程序执行耗时: ${elapsedTime} 毫秒")
  }

  /**
   * 计算两个商品之间的余弦相似度
   *
   * @param product1
   * @param product2
   * @return
   */
  private def cosSim(product1: DoubleMatrix, product2: DoubleMatrix): Double = {
    product1.dot(product2) / (product1.norm2() * product2.norm2())
  }

  /**
   * 计算商品推荐列表
   *
   * @param spark     SparkSession对象
   * @param ratingRDD 评分数据的RDD
   * @return          商品推荐列表的DataFrame
   */
  private def calculateProductRecommendations(spark: SparkSession, ratingRDD: org.apache.spark.rdd.RDD[(Int, Int, Double)]): sql.DataFrame = {
    import spark.implicits._
    // 将RDD转换为DataFrame
    val ratingDF = ratingRDD.toDF("userId", "productId", "rating")

    // 创建ALS模型
    val als = new ALS()
      .setUserCol("userId")
      .setItemCol("productId")
      .setRatingCol("rating")
      .setColdStartStrategy("drop")

    // 定义更精细的参数网格
    val paramGrid = new ParamGridBuilder()
      .addGrid(als.rank, Array(5, 10))
      .addGrid(als.maxIter, Array(10, 15))
      .addGrid(als.regParam, Array(0.01, 0.1))
      .build()

    // 创建TrainValidationSplit进行参数调优
    val trainValidationSplit = new TrainValidationSplit()
      .setEstimator(als)
      .setEvaluator(new RegressionEvaluator().setLabelCol("rating"))
      .setEstimatorParamMaps(paramGrid)
      .setTrainRatio(0.8)

    // 训练模型
    val model = trainValidationSplit.fit(ratingDF).bestModel.asInstanceOf[org.apache.spark.ml.recommendation.ALSModel]

    // 获取商品的特征矩阵，使用itemFactors方法
    val productFeatures = model.itemFactors.rdd.map { row =>
      val productId = row.getAs[Int]("id")
      // 获取WrappedArray[Float]
      val floatFeatures = row.getAs[scala.collection.mutable.WrappedArray[Float]]("features")
      // 将Float元素转换为Double元素
      val doubleFeatures = floatFeatures.map(_.toDouble).toArray
      (productId, new DoubleMatrix(doubleFeatures))
    }

    // 广播商品特征
    val broadcastProductFeatures = spark.sparkContext.broadcast(productFeatures.collect())

    // 计算相似度
    val productRecs = productFeatures.map { case (productId, features) =>
      val simScores = broadcastProductFeatures.value
        .filter(_._1 != productId)
        .map { case (otherProductId, otherFeatures) =>
          val simScore = cosSim(features, otherFeatures)
          (otherProductId, simScore)
        }.filter(_._2 > 0.6)
        .sortBy(-_._2)
        .take(30)
      ProductRecs(productId, simScores.map(x => Recommendation(x._1, x._2)))
    }

    productRecs.toDF()
  }
}