package Als

import org.apache.spark.ml.evaluation.RegressionEvaluator
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.sql.SparkSession

object ALSExample {
    case class Rating(userId:Int,movieId:Int,rating:Float,timestamp:Long)

    def ratingParse(st:String)={
      val fields = st.split("::")
      assert(fields.size==4)
      Rating(fields(0).toInt,fields(1).toInt,fields(2).toFloat,fields(3).toLong)
    }

    def main(args: Array[String]): Unit = {
      val spark = SparkSession
        .builder()
        .appName(s"${this.getClass.getName}")
        .master("local[*]")
        .getOrCreate()
      import spark.implicits._
      val ratings = spark.read.textFile("D:\\asa\\dianshang\\sample_movielens_ratings.txt")
        .map(ratingParse)
        .toDF()
      //对数据随即进行分割，80%为训练样例，20%为测试样例
      val Array(training,test) = ratings.randomSplit(Array(0.8,0.2))

      val als = new ALS()
        .setMaxIter(5) //迭代次数，最大值好像不能>=30
        .setRegParam(0.1) //正则化参数，是每次迭代平滑
        .setUserCol("userId") //设置用户id
        .setItemCol("movieId") //设置商品id
        .setRatingCol("rating")//设置评分

      //调用算法开始训练（用定义好的算法对训练数据进行训练），得到一个模型
      val model = als.fit(training)
      model.setColdStartStrategy("drop")
      //用得到的模型度测试数据进行测试
      val predictions = model.transform(test)
      //设置回归分析
      val evaluator = new RegressionEvaluator()
        .setMetricName("rmse")
        .setLabelCol("rating")
        .setPredictionCol("prediction")
      //计算出错率
      val rmse = evaluator.evaluate(predictions)
      println(s"Root-mean-square error =$rmse")
      //Root-mean-square error =1.0173707077705045


      //给每一个用户推荐他可能最喜欢的10部电影
      val userReces = model.recommendForAllUsers(10)
      userReces.repartition(2).write.json("D:\\asa\\dianshang\\user")
      userReces.show()
      //+------+--------------------+
      //|userId|     recommendations|
      //+------+--------------------+
      //|    28|[[92,4.350857], [...|
      //|    26|[[51,4.924452], [...|
      //|    27|[[32,3.3358378], ...|
      //|    12|[[64,4.062129], [...|
      //|    22|[[75,4.7751064], ...|
      //|     1|[[68,3.1027606], ...|
      //|    13|[[29,2.5459523], ...|
      //|     6|[[29,3.2655535], ...|
      //|    16|[[85,4.0573096], ...|
      //|     3|[[51,4.2693205], ...|
      //|    20|[[94,3.2571828], ...|
      //|     5|[[55,4.054689], [...|
      //|    19|[[94,3.7747586], ...|
      //|    15|[[64,2.7460227], ...|
      //|    17|[[46,4.8198], [90...|
      //|     9|[[49,3.8654559], ...|
      //|     4|[[53,4.014618], [...|
      //|     8|[[29,4.4880958], ...|
      //|    23|[[32,4.816233], [...|
      //|     7|[[25,3.8649848], ...|
      //+------+--------------------+
      //only showing top 20 rows
      println("______________________________")
      //每个电影最喜欢的10个人
      val movieReces = model.recommendForAllItems(10)
      movieReces.repartition(2).write.json("D:\\asa\\dianshang\\movie")
      movieReces.show()
      //+-------+--------------------+
      //|movieId|     recommendations|
      //+-------+--------------------+
      //|     31|[[7,2.403637], [1...|
      //|     85|[[16,4.0573096], ...|
      //|     65|[[23,4.31685], [1...|
      //|     53|[[21,4.6345034], ...|
      //|     78|[[22,1.1221493], ...|
      //|     34|[[2,3.3780231], [...|
      //|     81|[[28,3.971647], [...|
      //|     28|[[18,4.0349655], ...|
      //|     76|[[7,2.5584898], [...|
      //|     26|[[0,2.2844832], [...|
      //|     27|[[11,4.3201246], ...|
      //|     44|[[18,3.147554], [...|
      //|     12|[[28,3.939958], [...|
      //|     91|[[25,2.5529094], ...|
      //|     22|[[26,4.066805], [...|
      //|     93|[[2,4.4035034], [...|
      //|     47|[[7,3.1560025], [...|
      //|      1|[[25,2.6826065], ...|
      //|     52|[[14,4.6533403], ...|
      //|     13|[[23,3.6338665], ...|
      //+-------+--------------------+
      //only showing top 20 rows

      spark.stop()
    }

}
