import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.ALS
object MovieLensALS {
  case  class  Rating(userId: Int, movieId: Int, rating: Float, timestamp: Long);
  def  parseRating(str: String): Rating = {
    val  fields = str.split("::")
    Rating(fields(0).toInt, fields(1).toInt, fields(2).toFloat, fields(3).toLong)
  }
  case  class  MovieRating(movieId: Int, movieName: String,tagName: String);
  def  parseMovies(str: String): MovieRating = {
    val  fields = str.split("::")
    MovieRating(fields(0).toInt,fields(1).toString(),fields(2).toString)
  }

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)//取消控制台输出日志
    val spark=SparkSession.builder().appName("MovieLensALS").master("local[2]").getOrCreate()//创建spark,设置名字MovieLensALS,master local[2]本地运行2core,getOrCreate创建
    import spark.implicits._//引入隐式转换
    /*val  ratings = spark.sparkContext.textFile("file:///usr/local/spark/data/mllib/als/sample_movielens_ratings.txt").
      map(parseRating).toDF()//读取外部数据,进行一系列的隐式转换*/
    val ratings=spark.sparkContext.textFile("hdfs://master:9000/input_spark/ratings.dat").map(parseRating).toDF()
    ratings.show()
    //val movies=spark.sparkContext.textFile("file:///input_spark/movies.dat").map(parseMovies).collect().toMap

    //val  numRatings=ratings.count()
    //val numUser =ratings.map(_._2.user).distinct().count()
    val  Array(training,test) = ratings.randomSplit(Array(0.8,0.2))//切分为训练集和测试集
    val  alsExplicit = new ALS().setMaxIter(5).setRegParam(0.01).setUserCol("userId").setItemCol("movieId").setRatingCol("rating")//构建隐式模型
    val  alsImplicit = new ALS().setMaxIter(5).setRegParam(0.01).setImplicitPrefs(true).setUserCol("userId").setItemCol("movieId").setRatingCol("rating")//构建显式模型
    val  modelExplicit = alsExplicit.fit(training)//令其符合训练模型
    val  modelImplicit = alsImplicit.fit(training)
    val  predictionsExplicit= modelExplicit.transform(test).na.drop()
    val  predictionsImplicit= modelImplicit.transform(test).na.drop()
    predictionsExplicit.show()
    predictionsImplicit.show()

    predictionsImplicit.repartition(1).write.format("com.databricks.spark.csv")
      .option("header",false)
      .option("delimiter","|")
      .save("hdfs://master:9000/input_spark/save.csv")

    val  evaluator = new RegressionEvaluator().setMetricName("rmse").setLabelCol("rating").setPredictionCol("prediction")//计算rmse（均方根误差）的方法来评估模
    val  rmseExplicit = evaluator.evaluate(predictionsExplicit)
    val  rmseImplicit = evaluator.evaluate(predictionsImplicit)
    println(s"Explicit:Root-mean-square error = $rmseExplicit")
    println(s"Implicit:Root-mean-square error = $rmseImplicit")

  }
}