package com.spark.mooc.ch5_rdd.part04_cases

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @description: 任务描述：在推荐领域有一个著名的开放测试集，下载链接是：http://grouplens.org/datasets/movielens/，
 *              该测试集包含三个文件，分别是ratings.dat、sers.dat、movies.dat，具体介绍可阅读：README.txt。
 *              请编程实现：通过连接ratings.dat和movies.dat两个文件得到平均得分超过4.0的电影列表，采用的数据集是：ml-1m
 * @time: 2020/11/27 17:38
 * @author: lhy
 */
object SparkJoin {
    def main(args: Array[String]): Unit = {
//        if (args.length != 3){
//            println("usage is WordCount <rating> <movie> <output>")
//            return
//        }
        val conf: SparkConf = new SparkConf().setAppName("SparkJoin").setMaster("local")
        val sc = new SparkContext(conf)
        // Read rating from HDFS file 
        val testFile: RDD[String] = sc.textFile("input/rdd/join_moveData/ratings.dat")
        // extract (movieId, rating) 
        val rating: RDD[(Int, Double)] = testFile.map(line => {
            val fileds = line.split("::")
            (fileds(1).toInt,fileds(2).toDouble)
        })
        // get (movieId,ave_rating) 
        val moveScores: RDD[(Int, Double)] = rating.groupByKey().map(data => {
            val avg = data._2.sum / data._2.size
            (data._1,avg)
        })

        // Read movie from HDFS file 
        val movies: RDD[String] = sc.textFile("input/rdd/join_moveData/movies.dat")
        // get (movieId,movieName) 
        val moviesKey: RDD[(Int, (Int, String))] = movies.map(line => {
            val fileds: Array[String] = line.split("::")
            (fileds(0).toInt,fileds(1))
        }).keyBy(tup => tup._1)     // keyBy说明：OlderRDD[key,value] ==> NewRDD[OlderRDD[key],OlderRDD]

        // by join, we get <movieId, averageRating, movieName> 
        /**
         * join操作：
         * RDD1[key,value1]
         * RDD2[key,value2]
         * RDD1.join(RDD2) = RDD[key,(value1,value2)]
         * 例如此程序中：
         *  RDD1[key,value1] 即 moveScores.keyBy(tup => tup._1)的结果，结果记作 movieScores'[movieId,(movieId,averageRating)]
         *  RDD2[key,value2] 即 moviesKey[movieId,(movieId,movieName)]
         *  ∴ RDD1.join(RDD2) 即 result_join[movieId, ((movieId,averageRating),(movieId,movieName))]
         */
        val result: RDD[(Int, Double, String)] = moveScores.keyBy(tup => tup._1)
                                                           .join(moviesKey)     // 得到的结果是：RDD[movieId, ((movieId,averageRating),(movieId,movieName))]
                                                           .filter(f => f._2._1._2 > 4.0)
                                                           .sortBy(f => f._2._1._2,ascending = false)
                                                           .map(f => (f._1,f._2._1._2,f._2._2._2))
//        result.saveAsTextFile(args(2))
//        result.foreach(println)
    }
}
