package com.jiang.movie.metrics

import com.jiang.movie.demos.{Movies, Ratings, tenMostRatedFilms}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

class MostRatedFilms {

  def run(moviesDF: DataFrame, ratingsDF: DataFrame,spark: SparkSession): Unit ={
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // TODO 需求3：查找被评分次数较多的前十部电影
    // TODO ==========SQL查询=============
    // 将moviesDataset注册成表
    moviesDF.createOrReplaceTempView("movies")
    // 将ratingsDataset注册成表
    ratingsDF.createOrReplaceTempView("ratings")

    val ressql3 =
      """
        |WITH rating_group AS (
        |    SELECT
        |       movieId,
        |       count( * ) AS ratingCnt
        |    FROM ratings
        |    GROUP BY movieId
        |),
        |rating_filter AS (
        |    SELECT
        |       movieId,
        |       ratingCnt
        |    FROM rating_group
        |    ORDER BY ratingCnt DESC
        |    limit 10
        |)
        |SELECT
        |    m.movieId,
        |    m.title,
        |    r.ratingCnt
        |FROM
        |    rating_filter r
        |JOIN movies m ON r.movieId = m.movieId
        |order by r.ratingCnt desc
  """.stripMargin

    val resultDS = spark.sql(ressql3).as[tenMostRatedFilms]
    // 打印数据
    resultDS.show(false)

    // TODO 需求3：查找被评分次数较多的前十部电影
    // TODO ==========DSL查询=============
    val movieDS: Dataset[Movies] = moviesDF.as[Movies]
    val ratingDS: Dataset[Ratings] = ratingsDF.as[Ratings]

    val ratingTop10: Dataset[Row] = ratingDS.groupBy('movieId)
      .agg(count("*") as "countRating")
      .orderBy('countRating.desc)
      .limit(10)

    val joinDF: DataFrame = ratingTop10.join(movieDS,"movieId")

    val result: DataFrame = joinDF.orderBy('countRating.desc)
        .select("movieId", "title", "countRating")
        .limit(10)

    result.show(false)

  }

}
