package com.study.spark.ml.movie.basic

import com.study.spark.ml.movie.Util
import org.apache.spark.sql.functions.{avg, count, desc}

/**
  * 评分数据
  *
  * @author: stephen.shen
  * @create: 2019-04-10 10:31
  */
object RatingData {

  def main(args: Array[String]): Unit = {
    //getMovieRatingAvgCountSorted
    getRatingCount
  }

  /**
    * 每部电影的平均分以及评分次数
    * @return
    */
  def getMovieRatingAvgCountSorted: Array[(Int,Double,Int)] ={
    val df = Util.getRatingDataDF()
    // 每一部电影的平均评分以及评分次数
    val resultDF = df.select("movie_id","rating")
      .groupBy("movie_id")
      .agg(avg("rating").as("rating_avg"),count("rating").as("rating_count"))
      .orderBy(desc("rating_count"))
    resultDF.show(false)
    // DF转成RDD，输出Array
    val result = resultDF.rdd.map(row => (
      row.getAs[Int]("movie_id"),
      row.getAs[Double]("rating_avg"),
      row.getAs[Int]("rating_count")
    )).collect()
    Util.sc.stop()
    result
  }

  /**
    * 每一个评级的分布
    * @return
    */
  def getRatingCount: Array[(Int,Long)] ={
    val df = Util.getRatingDataDF()
    // 每一部电影的平均评分以及评分次数
    val resultDF = df.groupBy("rating")
      .count()
      .sort("rating")
    resultDF.show(false)
    // DF转成RDD，输出Array
    val result = resultDF.rdd.map(row => (
      row.getAs[Int]("rating"),
      row.getAs[Long]("count")
    )).collect()
    Util.sc.stop()
    result
  }
}
