package com.example.spark.sql

import com.example.util.SparkUtil
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 * @title: MovieDataAnalysis
 * @projectName bigdata
 * @description: MovieDataAnalysis
 * @author leali
 * @date 2022/5/17 22:56
 */
object MovieDataAnalysis {

  def main(args: Array[String]): Unit = {
    //config("spark.sql.shuffle.partitions", "4")
    val spark: SparkSession = SparkUtil.initSimpleSparkSession(appName = "MovieDataAnalysis")

    import spark.implicits._
    val movieDF: DataFrame = spark.read
      .textFile("src/data/input/rating_100k.data")
      .map((line: String) => {
        val arr: Array[String] = line.split("\t")
        (arr(1), arr(2).toInt)
      }).toDF("movieId", "score")

    movieDF.printSchema()
    movieDF.show()

    //需求:统计评分次数>200的电影平均分Top10
    //======SQL
    //注册表
    movieDF.createOrReplaceTempView("t_movies")
    val sql: String =
      """
        |select movieId,avg(score) as avgscore,count(*) as counts
        |from t_movies
        |group by movieId
        |having counts > 200
        |order by avgscore desc
        |limit 10
        |""".stripMargin
    spark.sql(sql).show()

    // ======DSL
    import org.apache.spark.sql.functions._
    movieDF.groupBy('movieId)
      .agg(
        avg('score) as "avgscore",
        count("movieId") as "counts"
      ).filter('counts > 200)
      .orderBy('avgscore.desc)
      .limit(10)
      .show()

    spark.stop()
  }
}
