package org.example

import org.apache.spark.sql.SparkSession

object b {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkBase")
      .getOrCreate()
    val sc = spark.sparkContext

    val filepath = "src/main/resources/"
    val usersRDD = sc.textFile(filepath + "users.dat")
    val occupationsRDD = sc.textFile(filepath + "occupations.dat")
    val ratingsRDD = sc.textFile(filepath + "ratings.dat")
    val moviesRDD = sc.textFile(filepath + "movies.dat")

    // 2. 缓存数据
    occupationsRDD.cache()
    usersRDD.cache()
    ratingsRDD.cache()
    moviesRDD.cache()

    // 3. 打印数据量
    println("职业数:" + occupationsRDD.count())
    println("电影数:" + moviesRDD.count())
    println("用户数:" + usersRDD.count())
    println("评分条数:" + ratingsRDD.count())

    // 4. 需求一：某个用户看过的电影数量及电影信息
    val userMovies = ratingsRDD.map(x => x.split("::")).filter(x => x(0) == "20").map(x => x(1)).distinct()
    println("用户20观看过的电影数: " + userMovies.count())

    val moviesInfo = moviesRDD.map(x => x.split("::")).map(x => (x(0), (x(1), x(2)))).collectAsMap()
    userMovies.collect().foreach(movieId => {
      val movieInfo = moviesInfo.get(movieId)
      if (movieInfo.isDefined) {
        println(s"($movieId,${movieInfo.get._1},${movieInfo.get._2})")
      }
    })

    // 5. 需求二：平均得分最高的前10名电影名称简单版
    val rating = ratingsRDD.map(x => x.split("::")).map {
      x => (x(0), x(1), x(2)) // (UserID, MovieID, Rating)
    }.cache()

    println("平均得分最高的前10名电影名称简单版")
    val avgRatings = rating.map(x => (x._2, (x._3.toDouble, 1)))
      .reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2))
      .map(x => (x._2._1 / x._2._2, x._1))
      .sortByKey(false)
      .take(10)
    avgRatings.foreach(println)

    println("按平均分取前10部电影输出详情:(平均分,(movieId,Title,Genres,总分,总次数))")
    val moviesInfoRDD = moviesRDD.map(x => x.split("::")).map(x => (x(0), (x(1), x(2))))
    val ratingsInfo = rating.map(x => (x._2, (x._3.toDouble, 1)))
      .reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2))
      .map(x => (x._1, (x._2._1 / x._2._2, x._2._1, x._2._2)))

    val joinedInfo = moviesInfoRDD.join(ratingsInfo)
      .map(info => (info._2._2._1, (info._1, info._2._1._1, info._2._1._2, info._2._2._2, info._2._2._3)))
      .sortByKey(false)
      .take(10)
    joinedInfo.foreach(println)

    println("观影人数最多的前10部电影")
    val watchViewsInfo = rating.map(x => (x._2, 1))
      .reduceByKey((x, y) => x + y)
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .take(10)
    watchViewsInfo.foreach(println)

    println("===================>")
    val watchViewsInfoDetailed = rating.map(x => (x._2, 1))
      .reduceByKey((x, y) => x + y)
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .take(10)
    watchViewsInfoDetailed.foreach(println)

    println("详情的输出(观影人数，电影编号)")
    val detailedInfo = moviesInfoRDD.join(ratingsInfo)
      .map(x => (x._2._2._3, (x._2._1._1, x._2._1._2, x._2._2._1, x._2._2._2)))
      .sortByKey(false)
      .take(10)
    detailedInfo.foreach(println)
// 统计电影类型（有多少部恐怖片）
    val movieStyle = moviesRDD.map(_.split("::"))
      .map(x => (x(0),x(2)))
      .flatMapValues(x => x.split("\\|"))
      .map(x => (x._2,1))
      .reduceByKey((x,y) => x+y)
    movieStyle.foreach(println)

    val comedyMovies = moviesRDD.map(x => x.split("::"))
      .filter(x => x(2).contains("Comedy"))
      .count()
    println(s"喜剧片的电影数量: $comedyMovies")

    val Movies = moviesRDD.map(x => x.split("::"))
      .filter(x => x(1).contains("(2000)"))
      .count()
    println(s"2000年度生产的电影数量: $Movies")
  }
}


