package com.spark.mooc.ch5_rdd.part04_cases

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @description:
 * @time: 2020/12/16 14:42
 * @author: lhy
 */
object JoinDemo {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf().setAppName("xxoo").setMaster("local")
        val sc = new SparkContext(conf)
        val movies = sc.textFile("input/rdd/join_moveData/movies.dat")
        val ratings = sc.textFile("input/rdd/join_moveData/ratings.dat")
        // 先得到<MovieID,(MovieID,Title)>
        val movie = movies.map(_.split("::")).map(line => {
            (line(0).toInt,line(1))
        }).keyBy(_._1)
        // 再得到<MovieID,(MovieID,AvgRating)>
        val rating = ratings.map(_.split("::")).map(line => {
            (line(1).toInt,line(2).toDouble)
        }).groupByKey()
          .mapValues(data => data.sum /data.size)
          .keyBy(_._1)
        // 最后进行拼接<movieID,Title,AvgRating>和筛选AvgRating > 4.0
        val results = movie.join(rating)		// <MovieID,((MovieID,Title),(MovieID,AvgRating))>
//          .filter(f => f._2._2._2 > 4.0)
          .sortBy(f => f._2._2._2,ascending = false)
          .map(f => (f._1,f._2._1._2,f._2._2._2))
        results.foreach(println)
    }
}
