package com.jiang.movie.metrics

import com.jiang.movie.demos.{Movies, Ratings, topGenresByAverageRating}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

class GenresByAverageRating {

  def run( moviesDF: DataFrame, ratingsDF: DataFrame, spark: SparkSession )={
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // 将moviesDataset注册成表
    moviesDF.createOrReplaceTempView("movies")
    // 将ratingsDataset注册成表
    ratingsDF.createOrReplaceTempView("ratings")

    val ressql2 =
      """
        |WITH explode_movies AS (
        |SELECT
        | movieId,
        | title,
        | category
        |FROM
        | movies lateral VIEW explode ( split ( genres, "\\|" ) ) temp AS category
        |)
        |SELECT
        | m.category AS genres,
        | avg( r.rating ) AS avgRating
        |FROM
        | explode_movies m
        | JOIN ratings r ON m.movieId = r.movieId
        |GROUP BY
        | m.category
        | order by avgRating desc
        | """.stripMargin

    val resultDS:Dataset[topGenresByAverageRating] = spark.sql(ressql2).as[topGenresByAverageRating]

    // 打印数据
    resultDS.show(false)

    // 需求2：查找每个电影类别及其对应的平均评分
//    val movieDS: Dataset[Movies] = moviesDF.as[Movies]
//    val ratingDS: Dataset[Ratings] = ratingsDF.as[Ratings]
//
//    val explodeMovieDF: DataFrame = movieDS
//        .select(explode(split('genres,"|")) as "category")
//
//    val joinDF: DataFrame = explodeMovieDF.join(ratingDS,"movieId")
//
//    val result: Dataset[Row] = joinDF.groupBy('category)
//      .agg(avg('rating) as "avgRating")
//      .orderBy('avgRating.desc)
//      .limit(10)
//
//    result.show()


  }

}
