package com.sdg.offline

import breeze.numerics.sqrt
import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object AlsTrainer {
  //最大推荐的个数
  val MAX_RECOMMENDATIONS: Int = 10

  val MONGODB_RATING_COLLECTION = "Rating"

  /**
    * 计算并返回均方根误差
    * @param model
    * @param trainData
    */
  def computeRmse(model: MatrixFactorizationModel, realRatings: RDD[Rating]):Double = {
    //对真实数据调整数据格式  不要评分
    val testingData = realRatings.map { case Rating(user, product, rate) =>
      (user, product)
    }

    //使用训练好的模型,对评分进行预测
    val prediction = model.predict(testingData).map { case Rating(user, product, rate) =>
      ((user, product), rate)
    }

    //两个数据进行join 关联
    val realPredict = realRatings.map { case Rating(user, product, rate) =>
      ((user, product), rate)
    }.join(prediction)

    //计算军方分误差(半成品)
    sqrt(realPredict.map { case ((user, product), (rate1, rate2)) =>
      val err = rate1 - rate2
      err * err
    }.mean()) //mean = sum(list) / len(list)

  }

  /**
    * 参数优化方法
    *
    * @param ratingRDD
    * @return
    */
  def parameterAdjust(trainData: RDD[Rating]): (Int, Double, Double) = {
    //嵌套循环
    val evaluations = for (rank <- Array(10, 50); lambda <- Array(1.0, 0.0001); alpha <- Array(1.0, 40.0)) yield {
      //模型
      val model = ALS.train(trainData, rank, 5, lambda)
      //均方根误差  把均方根误差最小的那组参数选取出来
      val rmse: Double = computeRmse(model, trainData)
      ((rank, lambda, alpha), rmse)
    }
    val ((rank, lambda, alpha), rmse) = evaluations.sortBy(_._2).head
    println("After parameter adjust, the best rmse = " + rmse)
    (rank, lambda, alpha)
  }

  def main(args: Array[String]): Unit = {
    //封装参数
    val conf = Map("spark.cores" -> "local[2]",
      "mongo.uri" -> "mongodb://vm2:27017/recom3",
      "mongo.db" -> "recom3",
      "maxRecommendations" -> MAX_RECOMMENDATIONS)
    //mongo的配置信息
    implicit val mongoConf = new MongoConfig(conf("mongo.uri").asInstanceOf[String], conf("mongo.db").asInstanceOf[String])
    //sparkConf
    val sparkConf = new SparkConf().setAppName("RecommenderTest").setMaster("local[2]")
    //设置资源运行参数
    //.set("spark.executor.memory","6G")
    //创建sparkSession
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._
    //加载评分数据
    val ratingRDD = spark
      .read
      .option("uri", mongoConf.uri)
      .option("collection", MONGODB_RATING_COLLECTION)
      .format("com.mongodb.spark.sql")
      .load()
      //[用户id,电影的id,用户对电影的评分,评分的时间]
      .as[MoviesRating]
      .rdd
      //[用户id,电影的id,用户对电影的评分]
      .map(rating => Rating(rating.uid, rating.mid, rating.score))
      //如果不增加缓存的话,就会出现内存溢出的问题
      .cache()

    //输出最优化参数(也就是我们的均方根最小),通过程序自定化的给我们找参数
    val tuple: (Int, Double, Double) = parameterAdjust(ratingRDD)
    println(tuple)
    //关闭资源
    spark.stop()
  }

}
