package com.oscar.Offline

import org.apache.spark.SparkConf
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.sql.{Dataset, SparkSession}


object ALSTrainer {
  // 定义常量
  val MYSQL_RATING = "Rating"
  val RANKARRAY =Array(10.0,25,50)
  val ITERATIONSARRAY =Array(5.0,10,20)
  val LAMBDARANKARRAY =Array(0.1,0.01,0.001)
  val PARAMETERARRAY =Array(RANKARRAY,ITERATIONSARRAY,LAMBDARANKARRAY)

  def main(args: Array[String]): Unit = {
    //定义一些配置信息
    val config = Map(
      "spark.cores" -> "local[*]",
      "mysql.url" -> "jdbc:mysql://localhost:3306/recommender",
      "mysql.user" -> "root",
      "mysql.password" -> "123456"
    )
    // 创建一个spark config
    val sparkConf = new SparkConf().setMaster(config("spark.cores")).setAppName("StatisticsRecommender")
    // 创建spark session
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark.implicits._
    // 声明一个隐式的配置对象
    implicit val mysqlConfig =
      MySQLConfig(config.get("mysql.url").get,config.get("mysql.user").get,config.get("mysql.password").get)


    // 加载数据,创建训练数据集
    val ratingDS = spark.read
      .format("jdbc")
      .option("url", mysqlConfig.url)
      .option("dbtable", MYSQL_RATING)
      .option("user", mysqlConfig.user)
      .option("password", mysqlConfig.password)
      .load()
      .as[BookRating]
    // 将一个RDD随机切分成两个RDD，用以划分训练集和测试集
    val Array(trainData,testData) = ratingDS.randomSplit(Array(0.8, 0.2))
    //输出最优参数,得出模型
    // rank 是模型中隐语义因子的个数, iterations 是迭代的次数, lambda 是ALS的正则化参
    val (rank,iterations,lambda,model,rmse) = adjustALSParams(trainData, testData,PARAMETERARRAY)

    //关闭Spark
    spark.close()

  }

  // 输出最终的最优参数
  def adjustALSParams(trainData: Dataset[BookRating], testData: Dataset[BookRating], array: Array[Array[Double]]) = {
    val result = for(rank<-array(0);iterations<-array(1);lambda<-array(2))
      yield {
        // 调用ALS算法训练隐语义模型
        val model = new org.apache.spark.ml.recommendation.ALS()
          .setRank(rank.toInt).setMaxIter(iterations.toInt).setRegParam(lambda)
          .setUserCol("userId").setItemCol("bookId").setRatingCol("score")
          .fit(trainData.toDF())
        val predictions = model.transform(testData)
        val rmse: Double = new RegressionEvaluator().setLabelCol("score").evaluate(predictions)
        //        println("误差为===============>"+rmse+"###"+rank+"###"+iterations+"###"+lambda)
        (rank,iterations,lambda,model,rmse)
      }
    result.sortBy(_._5).head
  }

}
