package com.spark.ml


import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by TRS on 2017/6/26.
  */
object userCF {

  def main(args: Array[String]): Unit = {

    val config = new SparkConf().setMaster("local").setAppName("userCf")
    val sc = new SparkContext(config)

    //用户对电影评分 (userID,movieId,score,timestack)
    val userMovie = sc.textFile("E:\\file\\res\\movie.txt").map(x => x.split("\t").toSeq).map(x => (
      Rating(x(0).toInt, x(1).toInt, x(2).toFloat)
      ))
    userMovie.foreach(x => println(x))


    //拆分数据
    var Array(train, test) = userMovie.randomSplit(Array[Double](0.6, 0.4), 11L)

    //训练模型
    val rank = 10
    val numIterations = 20
    val model = ALS.train(train, rank, numIterations, 0.01)

    val mse = getMSE(train, model)
    println("train mse=" + mse)

     val testMse = getMSE(test, model)
     println("test mes=" + testMse)


  }

  //计算MSE
  def getMSE(ratings: RDD[Rating], model: MatrixFactorizationModel): Double = {
    val usersProducts = ratings.map(x => (x.user, x.product))
    val predictions = model.predict(usersProducts).map(x => ((x.user, x.product), x.rating))
    val ratesAndPreds = ratings.map(x => ((x.user, x.product), x.rating))

    val joins = ratesAndPreds.join(predictions)


    joins.foreach(x => println(x))

    val dif = joins.mapValues(x => (x._1 - x._2) * (x._1 - x._2))



    dif.values.mean()

  }

}
