package com.joyxj.movielens

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession

/**
  * 评分操作类
  *
  * @author xiaoj
  * @version 2018-03-13
  */
object RatingsOps {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("MoviesOps")
    val sparkSession = SparkSession.builder().config(conf).appName("MoviesOps").getOrCreate()
    val sc = sparkSession.sparkContext
    topNRatings(sc,100)
  }

  /**
    * 评分最高的电影
    * 1、读取ratings.csv文件
    * 2、获得各部电影的总评分和评论次数
    * 3、获得各部电影的评分
    * 4、对评分排序
    * 5、获得topN的电影
    */
  def topNRatings(sc:SparkContext,n:Int) : Unit= {
    //1、读取ratings.csv文件
    val lines = sc.textFile("E:\\BaiduNetdiskDownload\\ml-20m\\ratings.csv")
    //2、获得各部电影的总评分
    val sumRatings = lines.filter(line => !line.contains("userId")).map(line=>(line.split(",")(1),line.split(",")(2).toDouble))
      .reduceByKey(_+_)
    //3、获得各部电影的评论次数
    val countRatings = lines.map(line=>(line.split(",")(1),1)).reduceByKey(_+_)
    //4、获得各部电影的评分
    val topMovies = countRatings.join(sumRatings).map(item=>(item._1,(item._2._2/item._2._1)))
      //4、获得各部电影的评分
    .map(item=>(item._2,item._1)).sortByKey(false)
      //5、获得topN的电影
      .take(n).map(item=>(item._2,item._1))
    val movies = sc.textFile("E:\\BaiduNetdiskDownload\\ml-20m\\movies.csv").map(line=>(line.split(",")(0),line.split(",")(1)))
    movies.join(sc.parallelize(topMovies)).map(item => (item._2._1,item._2._2)).foreach(println(_))
  }

}
