package com.xxh.user.rec

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.spark.SparkConf
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import breeze.optimize.linear._

case class ParamResult(rank: Int, lambad: Double, rmse: Double)

object ALSTrainer {
  val MONGODB_URI = "mongodb://localhost:27017/movierec.movie"


  def main(args: Array[String]): Unit = {
    //创建sparkConfig
    val sparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("OfflineRecommend")
      .set("spark.mongodb.input.uri", MONGODB_URI);
    //创建SparkSession
    val spark2 = SparkSession.builder().config(sparkConf).getOrCreate()
    import spark2.implicits._

    val ratingDf = readDataFrameFromMongo("rating", spark2).cache()

    val data: RDD[Rating] = ratingDf.map(row => {
      Rating(row.getAs[Int]("uid"), row.getAs[Int]("mid"), row.getAs[Int]("score"))
    }).rdd

    //划分训练和测试集
    val Array(testRDD, trainRDD) = data.randomSplit(Array(0.2, 0.8), seed = 233)

    //  val model=ALS.train(trainRDD,10, 5, 0.01)
    //模型调参，最优化参数（特征维度和正则化系数）
    //    adjustParam(testRDD, trainRDD)

    //测试迭代次数
    testIterParams(testRDD, trainRDD)
    spark2.stop()
  }


  //根据测试集计算均方根误差
  def calRMSE(test: RDD[Rating], model: MatrixFactorizationModel): Double = {

    //观测值
    val realvalue: RDD[((Int, Int), Double)] = test.map(item => ((item.user, item.product), item.rating))

    //预测值
    val predvalue: RDD[((Int, Int), Double)] = model.predict(test.map(item => {
      (item.user, item.product)
    })).map(item => ((item.user, item.product), item.rating))

    //得到的数据格式：(uid,mid,real_score,predict_score)
    val tempRdd: RDD[((Int, Int), (Double, Double))] = realvalue.join(predvalue)

    //参数RMSE求解公式
    tempRdd.map {
      case (row) => {
        val lose = row._2._1 - row._2._2
        lose * lose
      }
    }.mean()
  }

  //调参函数
  def adjustParam(testRdd: RDD[Rating], trainRdd: RDD[Rating]): Unit = {


    val resultList = for (rank <- Array(10, 20, 50); lambda <- Array(0.001, 0.01, 0.1))
      yield {
        val model = ALS.train(trainRdd, rank, 5, lambda)
        val rmse = calRMSE(testRdd, model)
        (rank, lambda, rmse)
      }

    resultList.foreach(i => {
      println("rank:" + i._1, "lamda:" + i._2, "rmse" + i._3)
    })

    println("the best param combination :" + resultList.minBy(_._3))
  }

  //测试迭代次数的函数
  def testIterParams(testRdd: RDD[Rating], trainRdd: RDD[Rating]): Unit = {

    val resultList = for (iters <- Array(10, 15, 20, 25, 30, 35))
      yield {
        val model = ALS.train(trainRdd, 10, iters, 0.1)
        val rmse = calRMSE(testRdd, model)
        (iters, rmse)
      }

    resultList.foreach(i => {
      println("iters:" + i._1, "rmse:" + i._2)
    })

    println("the best param combination :" + resultList.minBy(_._2))
  }



  //把dataFrame保存到DB
  def SaveToDB(df: DataFrame, collectionName: String): Unit = {
    df.write
      .option("uri", "mongodb://localhost:27017/movierec")
      .option("collection", collectionName)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()
  }

  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }


}
