import breeze.numerics.sqrt
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object ALSTrainer extends App {
  //1.环境初始化
  private val spark: SparkSession = Constant.initEnv()
  //2.导入隐式转换的相关包
  import  spark.implicits._

  //3.从rating表读取数据  用户ID[uid]，物品ID[mid]，偏好值[score]
  private val ratingRDD: RDD[Rating] = spark.read
    .option("uri", Constant.MONGO_URL)
    .option("collection", Constant.RATING_COLLECTION)
    .format("com.mongodb.spark.sql")
    .load()
    .as[MovieRating] //用户ID[uid]，物品ID[mid]，偏好值[score] 时间戳
    .rdd
    .map(rating => Rating(rating.uid, rating.mid, rating.score))
    .cache()//加入缓存
  //随机二八分割
  private val splits: Array[RDD[Rating]] = ratingRDD.randomSplit(Array(0.8, 0.2))
  private val trainingRDD: RDD[Rating] = splits(0)
  private val testRDD: RDD[Rating] = splits(1)




  adjustALSParam(trainingRDD,testRDD)
  spark.close()
  //模型训练 欧几里得距离
  def getRMSE(model: MatrixFactorizationModel, testRDD: RDD[Rating])={
    //uid,mid
    val userProducts = testRDD.map(item => (item.user, item.product))

    val predictRating = model.predict(userProducts)
    //((uid,mid),score) ==>((user,product),rating))
    val a = testRDD.map(item => ((item.user, item.product), item.rating))
    val b = predictRating.map(item => ((item.user, item.product), item.rating))

    //sqrt( * ).mean
    sqrt(
      a.join(b).map{//((user,product),rating)
        case ((uid,mid),(score1,score2))=>
          val d = score1-score2
          d * d
      }.mean()
    )
  }
  def adjustALSParam(trainingRDD: RDD[Rating], testRDD: RDD[Rating]) = {
    //val(rank,iterations,lambda) =(50,5,0.1)
    val result = for (rank <- Array(50, 60, 70); lambda <- Array(0.1, 0.01, 1)) yield {//Vector(x,x,x)
      val model = ALS.train(trainingRDD, rank, 5, lambda)
      val rmse = getRMSE(model,testRDD)
      (rank, lambda, rmse)
    }
    println(result.minBy(_._3))
  }


 }


