package com.spark.cust.movie


import java.util.Properties

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{DoubleType, IntegerType, StringType, StructField, StructType}

/**
 * @description:
 * @time: 2020/12/4 0:47
 * @author: lhy
 */
object Code02_MovieScoreAvg {
    def main(args: Array[String]): Unit = {
        val spark: SparkSession = SparkSession.builder().appName("MovieScoreAvg").master("local").getOrCreate()
        import spark.implicits._
        val fields: Array[StructField] = Array(StructField("movieId", IntegerType, nullable = true),
                                               StructField("movieName",StringType,nullable = true),
                                               StructField("ave_rating", DoubleType, nullable = true))
        val schema: StructType = StructType(fields)
        val ratingsFile: RDD[String] = spark.sparkContext.textFile("input/movie/ratings.dat")
        val moviesFile: RDD[String] = spark.sparkContext.textFile("input/movie/movies.dat")
        // extract (movieId, rating) 
        val rating: RDD[(Int, Double)] = ratingsFile.map(line => {
            val fileds: Array[String] = line.split("::")
            (fileds(1).toInt,fileds(2).toDouble)
        })
        // get (movieId,ave_rating) 
        val moveScores: RDD[(Int, Double)] = rating.groupByKey().map(data => {
            val avg = data._2.sum / data._2.size
            (data._1,avg)
        })
        // get (movieId,movieName) 
        val moviesKey: RDD[(Int, (Int, String))] = moviesFile.map(line => {
            val fileds: Array[String] = line.split("::")
            (fileds(0).toInt,fileds(1))
        }).keyBy(tup => tup._1)
        // by join, we get <movieId, movieName, averageRating> 
        val movieRow: RDD[Row] = moviesKey.join(moveScores.keyBy(tup => tup._1))
                                          .sortBy(f => f._2._2._2 ,ascending = false)
                                          .map(f => (f._1,f._2._1._2,f._2._2._2))
                                          .map(attributes => Row(attributes._1,attributes._2,attributes._3))

        val ratingsDF: DataFrame = spark.createDataFrame(movieRow, schema)
        val prop = new Properties()
        prop.put("user","root")
        prop.put("password","bigdata")
        prop.put("driver","com.mysql.jdbc.Driver")
        ratingsDF.write.mode("append").jdbc("jdbc:mysql://192.168.21.104:3306/spark","spark.movie_ratings",prop)

        ratingsDF.createOrReplaceTempView("movie_ratings")
        val results: DataFrame = spark.sql("select movieName,ave_rating from movie_ratings where ave_rating >= 4.5")
        results.map(attributes => attributes(0) + "  " + attributes(1)).show()
    }
}
