package SparkMLlib.SparkMLlibItemAls
import org.apache.log4j.Logger
import org.apache.log4j.Level
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.Rating
import org.apache.spark.mllib.recommendation.MatrixFactorizationModel
import scala.io.Source
import org.apache.spark.rdd.RDD


object BaedItermCFDemo01 {
  def main(args: Array[String]): Unit = {
    //屏蔽日志信息
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //构建sc对象
    val conf = new SparkConf().setAppName("BaedItermCFDemo01").setMaster("local")
    val sc = new SparkContext(conf)
    val seq = getSeqRDD("D://GitProjects//SparkKafkaHadoopZookeeperHBaseHiveRedis//spark//src//main//resource//MLlibMovieAlsModel.txt")
    val ratings = sc.parallelize(seq, 1)
    //计算商品个数，用户个数，评分个数
    val pNum = ratings.map(f=>f.product).distinct().count()
    val uNum = ratings.map(f=>f.user).distinct().count()
    val rNum = ratings.map(f=>f.rating).count()
    //val model1 = ALS.train(ratings, 15, 5, 0.01)
    /*
     * rating  评分矩阵
     * 经验值：
     * rank 数据循环训练的此时 设置10-200 数据越大越准确，计算也就越复杂
     * iteration 模型迭代计算次数10-20 数据越大越准确，计算也就越复杂
     * lambda 惩罚函数的因数，是ALS的正则化参数，推荐值：0.01
     */
    //推测最佳模型
    val ranks = List(5,15)
    val iterations = List(2,5)
    val lambdas = List(0.1,1)
    // 寻找 最佳模型
    var bestModel:Option[MatrixFactorizationModel] = None
    var bestRank = 0
    var bestIteration = -1
    var bestLambda = -1.0
    //初始误差，寻找最小误差
    var bestRMSE:Double= Double.MaxValue
    for(rank<-ranks;iteration<-iterations;lambda<-lambdas){
      //寻找最佳模型
      val model = ALS.train(ratings, rank, iteration, lambda)
      //计算每个数值的误差
      val rmse = computeRMSE(model,ratings,rNum)
      //判断得出最佳模型
      if(rmse<bestRMSE){
        bestModel = Some(model)
        bestRMSE = rmse
        bestRank = rank
        bestIteration = iteration
        bestLambda = lambda
      }
    }
    println("最佳模型：" + bestModel)
    println("最佳的RMSE："+bestRMSE)
    println("最佳的Lambda："+bestLambda)
    println("最佳的迭代次数Iteration："+bestIteration)

  }

  //评分矩阵 ratings需要的是一个rdd 现在需要构建一个rdd，也就是一个序列
  def getSeqRDD(path:String):Seq[Rating]={
    val data = Source.fromFile(path).getLines().map(f=>f.split(",")match{
      case Array(user,product,rat)=>Rating(user.toInt,product.toInt,rat.toDouble)
    }).filter(f=>f.rating>0.0)

    if(data.isEmpty){
      println("数据错误")
      return null
    }else{
      data.toSeq
    }
  }
  //定义方法采用当前model 获取数据，误差是多少
  //3个参数，一个当前model值 以及 实际评分矩阵值，总共多少条评分数据
  def computeRMSE(model:MatrixFactorizationModel,data:RDD[Rating],n:Long):Double={
    /*获取等值条件
     * select 计算值，实际值
     * from model,data
     * where model.(user,product) = data.(user,product)
     */
    //等价条件 计算值
    val equal = model.predict((data.map(f=>(f.user,f.product))))
    //获取 计算值得矩阵
    val predictRating = equal.map(f=>((f.user,f.product),f.rating))
    //获取实际值得矩阵
    val realRating = data.map(f=>((f.user,f.product),f.rating))
    //将两个评分矩阵进行合并计算均方根误差
    val predictAndReal = predictRating.join(realRating).values
    math.sqrt(predictAndReal.map(f=>(f._1-f._2)*(f._1-f._2)).reduce(_+_)/n)

  }

}
