package cn.edu.recommender

import breeze.numerics.sqrt
import cn.edu.recommender.OfflineRecommender.MONGODB_RATING_COLLECTION
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import java.lang.Thread.sleep

object ALSTrainer {

  def getRMSE(model: MatrixFactorizationModel, testData: RDD[Rating]) = {
    val input = testData.map(item => (item.user,item.product))
    val output: RDD[Rating] = model.predict(input)

    val label = testData.map(rating => ((rating.user, rating.product), rating.rating))
    val predict = output.map(rating => ((rating.user, rating.product), rating.rating))
    // 计算 RMSE
    sqrt(
      label.join(predict)
        .map{ case ((uid,mid),(real,pre)) =>
          // 真实值和预测值之间的差
          val err = real - pre
          err * err
        }
        .mean()
    )
  }

  // 输出最终的最优参数
  def adjustALSParams(trainData: RDD[Rating], testData: RDD[Rating]): Unit = {
    // 这里指定迭代次数为 5，rank 和 lambda 在几个值中选取调整
    val result = for(rank <- Array(100, 200); lambda <- Array(1, 0.1, 0.01))
      yield {
        val model = ALS.train(trainData,rank,5,lambda)
        val rmse = getRMSE(model, testData)
        (rank,lambda,rmse)
      }
    // 按照 rmse 排序
    println(result.sortBy(_._3).mkString("\n"))
  }

  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mongo.uri" -> "mongodb://localhost:27017/recommender",
      "mongo.db" -> "recommender"
    )

    val mongoConfig = MongoConfig(config("mongo.uri"), config("mongo.db"))

    val spark = SparkSession.builder().appName("ALSTrainer").master(config("spark.cores")).getOrCreate()


    import spark.implicits._

    //读取 mongoDB 中的业务数据
    val ratingDS = spark
      .read
      .option("uri", mongoConfig.uri)
      .option("collection", MONGODB_RATING_COLLECTION)
      .format("com.mongodb.spark.sql")
      .load()
      .as[MovieRating]
      .map(rating => Rating(rating.uid, rating.mid, rating.score))
      .cache()


    // 将一个 RDD 随机切分成两个 RDD，用以划分训练集和测试集
    val split = ratingDS.randomSplit(Array(0.8, 0.2))
    val (trainData, testData) = (split(0).rdd, split(1).rdd)

    //输出最优参数
    adjustALSParams(trainData, testData)

    sleep(1200000)
    // 关闭 Spark
    spark.stop()
  }
}
