package com.jiang.movie.metrics

import com.jiang.movie.demos.{Movies, Ratings, tenGreatestMoviesByAverageRating}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

class BestFilmsByOverallRating extends Serializable {

  def run(moviesDF: DataFrame, ratingsDF: DataFrame, spark: SparkSession)={
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // TODO ==========SQL查询=============
    // 将moviesDataset注册成表
    moviesDF.createOrReplaceTempView("movies")
    // 将ratingsDataset注册成表
    ratingsDF.createOrReplaceTempView("ratings")
    // 查询SQL语句
    val ressql1 =
      """
        |WITH ratings_filter_cnt AS (
        |SELECT
        |	    movieId,
        |	    count( * ) AS rating_cnt,
        |	    avg( rating ) AS avg_rating
        |FROM
        |	    ratings
        |GROUP BY
        |	    movieId
        |HAVING
        |	    count( * ) >= 5000
        |),
        |ratings_filter_score AS (
        |SELECT
        |     movieId, -- 电影id
        |     avg_rating -- 电影平均评分
        |FROM ratings_filter_cnt
        |ORDER BY avg_rating DESC -- 平均评分降序排序
        |LIMIT 10 -- 平均分较高的前十部电影
        |)
        |SELECT
        |	   m.movieId,
        |	   m.title,
        |	   r.avg_rating AS avgRating
        |FROM
        |	  ratings_filter_score r
        |JOIN movies m ON m.movieId = r.movieId
        |order by r.avg_rating DESC limit 10
      """.stripMargin

    val resultDS = spark.sql(ressql1).as[tenGreatestMoviesByAverageRating]
    // 打印数据
    resultDS.show(false)

    // TODO ==========DSL查询=============
    // 需求1：查找电影评分个数超过5000,且平均评分较高的前十部电影名称及其对应的平均评分
    val movieDS: Dataset[Movies] = moviesDF.as[Movies]
    val ratingDS: Dataset[Ratings] = ratingsDF.as[Ratings]

    val avgRating: Dataset[Row] = ratingDS.groupBy('movieId)
      .agg(
        count("*") as "rating_cnt",
        avg('rating) as "avgRating"
      )
      .filter('rating_cnt >= 5000)
      .orderBy('avgRating.desc)
      .limit(10)

    val result: DataFrame = movieDS.join(avgRating,"movieId")
        .select("movieId","title","avgRating")
        .orderBy('avgRating.desc)

//    val result: Dataset[Row] = joinDF.groupBy('movieId)


    result.show(false)


//    spark.stop()
  }

}
