package rdd

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDD_DemoExample04 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("RDD_DemoExample04")

    val sc: SparkContext = new SparkContext(conf)
    // 加载数据，构造RDD
    val moviesRDD: RDD[String] = sc.textFile("data/movies.csv")
    val ratingsRDD: RDD[String] = sc.textFile("data/ratings.csv")

    // 简单探索
    println("电影数据集中记录数量：" + moviesRDD.count())
    println("电影评分数据集中记录数量：" + ratingsRDD.count())

    // 从评分数据集中抽取每部电影的评分，以(movieid, rating)的形式返回
    val ratingsAvgRDD: RDD[(String, Double)] = ratingsRDD
      .filter(line => !line.startsWith("userId,movieId"))
      .map(line => {
        val fields = line.split(",")
        (fields(1).trim, fields(2).trim.toDouble)
      })
      .groupByKey()
      .mapValues(iter => iter.sum / iter.size)
      .filter(tuple => tuple._2 > 4.0)

    // 从电影数据集中抽取电影名称，以(movieId, title)的形式返回
    val moviesAvgRDD: RDD[(String, String)] = moviesRDD
      .filter(line => !line.startsWith("movieId,title"))
      .map(line => {
        val fields = line.split(",")
        (fields(0).trim, fields(1).trim)
      })

    // 将两个数据集连接起来，得到（movieId, (title, avgScore)）类型结果
    //    val resultRDD: RDD[(String, (String, Double))] = moviesAvgRDD
    //      .join(ratingsAvgRDD)
    //      .sortBy(t => t._2._2, false)

    val resultRDD: RDD[(String, String, Double)] = moviesAvgRDD
      .join(ratingsAvgRDD)
      .map(t => (t._2._2, (t._1, t._2._1, t._2._2)))
      .sortByKey(false)
      .map(_._2)

    resultRDD.collect().foreach(println)
    resultRDD.saveAsTextFile("moviesOutput")


    sc.stop()
  }
}
