package com.ocean.offlinelfmrecommend

import breeze.numerics.sqrt
import com.ocean.offlinelfmrecommend.OfflineLFMRecommend.MySql_RATING_Table
import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object ALSTrainer {


  def main(args: Array[String]): Unit = {
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.uri" -> "jdbc:mysql://192.168.10.105:3306/recommend?useUnicode=true&characterEncoding=utf8&rewriteBatchedStatements=true&useSSL=false",
      "mysql.user" -> "root",
      "mysql.password" -> "cde32wsxzaq1"
    )

    // 创建一个sparkConf
    val sparkConf: SparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("OfflineLFMRecommend")

    // 创建一个SparkSession
    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    import spark.implicits._

    implicit val mysqlConfig = MysqlConfig(config("mysql.uri"), config("mysql.user"), config("mysql.password"))

    val ratingRDD: RDD[Rating] = spark.read.format("jdbc")
      .option("url", mysqlConfig.uri)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .option("dbtable", MySql_RATING_Table)
      .load()
      .as[MovieRating]
      .rdd
      .map(rating => Rating(rating.uid, rating.mid, rating.score)) //去掉时间戳
      .cache()

    val splits: Array[RDD[Rating]] = ratingRDD.randomSplit(Array(0.8, 0.2))
    val trainingRDD: RDD[Rating] = splits(0)
    val testRDD: RDD[Rating] = splits(1)

    trainALSParams(trainingRDD, testRDD)

    spark.stop()
  }

  def trainALSParams(trainingRDD: RDD[Rating], testRDD: RDD[Rating]) = {
    val result = for (rank <- Array(50, 100, 200, 300); lambda <- Array(0.01, 0.1, 1))
      yield {
        val model: MatrixFactorizationModel = ALS.train(trainingRDD, rank, 5, lambda)
        val rmse: Double = getRMSE(model, testRDD)
        (rank, lambda, rmse)
      }
    //(100,0.1,0.880330326405934)
    println(result.minBy(_._3))
  }

  def getRMSE(model: MatrixFactorizationModel, testRDD: RDD[Rating]): Double = {
    //使用测试集的量纲填充预测分数
    val userProducts: RDD[(Int, Int)] = testRDD.map(item => (item.user, item.product))
    val predictRating: RDD[Rating] = model.predict(userProducts)

    //观测的测试数据，结构为((user,product),rating)
    val observed: RDD[((Int, Int), Double)] = testRDD.map(item => ((item.user, item.product), item.rating))
    //预测数据变成相同的结构，因为我们要join
    val predict: RDD[((Int, Int), Double)] = predictRating.map(item => ((item.user, item.product), item.rating))

    sqrt(observed.join(predict).map {
      case ((uid, mid), (actual, pred)) => {
        val error: Double = actual - pred
        error * error
      }
    }.mean()
    )
  }

}
