package org.example

import org.apache.spark.sql.SparkSession

object SparkYun1_148 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("sparkBase")
      .getOrCreate()
    val sc = spark.sparkContext
//打印电影数据概况
    val filePath = "src/main/resources/"
    val usersRDD = sc.textFile(filePath + "users.dat")
    val moviesRDD = sc.textFile(filePath + "movies.dat")
    val ratingsRDD = sc.textFile(filePath + "ratings.dat")
    val occupationsRDD = sc.textFile(filePath + "occupations.dat")
    println("用户总数:"+ usersRDD.count())
    println("电影总数:" + moviesRDD.count())
    println("评分数:" + ratingsRDD.count())
    println("职业数:" + occupationsRDD.count())

    val userMovieRating = ratingsRDD.map(_.split("::"))
      .map(tp => (tp(1),tp(0))) //转换成key(电影ID) value(用户ID)
      .filter(_._2.equals("8"))
    println("用户id为8的评分电影总数是:" +userMovieRating.count())
    //将电影转变为key,value型
    val movieInfo = moviesRDD.map(_.split("::"))
      .map(sp => (sp(0),(sp(1),sp(2))))
    val detail = userMovieRating.join(movieInfo).map(xp =>{
      (xp._1,xp._2._1,xp._2._2._1,xp._2._2._1)
    })
    detail.take(5).foreach(println)
    //分析2：找出评分最高的10部电影和观影人数电影

    val rating = ratingsRDD.map(x => x.split("::")).map {
      x => {
        (x(0), x(1), x(2))
      }
    }.cache()
    println("平均得分最高的前 10 名的电影名称简单版")
    rating.map(x => (x._2, (x._3.toDouble, 1)))
      .reduceByKey((x, y) => {
        (x._1 + y._1, x._2 + y._2)
      })
      .map(x => (x._2._1 / x._2._2, x._1))
      .sortByKey(false)
      .take(10)
      .foreach(println)
    println("按平均分取前 10 部电影输出详情:(平均分,(movieId,Title,Genres,总分,总次 数))")
    val moviesInfo = moviesRDD.map(x => x.split("::"))
      .map(x => {
        (x(0),x(1),x(2))
      })
    val ratingsInfo = rating.map(x => (x._2, (x._3.toDouble, 1)))
      .reduceByKey((x, y) => {
        (x._1 + y._1, x._2 + y._2)
      })
      .map(x => (x._1, (x._2._1 / x._2._2, x._2._1, x._2._2)))
//    moviesInfo.join(ratingsInfo)
//      .map(info => {
//        (info._2._2._1, (info._1, info._2._1._1, info._2._1._2, info._2._2._2, info._2._2._3))
//      }).sortByKey(false)
//      .take(5)
//      .foreach(println)
    println("观影人数最多的前 10 部电影")
    val watchViewsInfo = rating.map(x => {
      (x._2, 1)
    }).reduceByKey((x, y) => x + y)
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .take(10)
    watchViewsInfo.foreach(println(_))
    println("===================>")
    rating.map(x => (x._2, 1))
      .reduceByKey((x, y) => {
        (x + y)
      })
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .take(10)
      .foreach(println)
    println("详情的输出(  观影人数，电影编号)")
//恐怖电影
    val movieStyle = moviesRDD.map(_.split("::"))
      .map(x => ((x(0),x((2)))))
      .flatMapValues(x => x.split("\\|"))
      .map(x => (x._2,1))
      .reduceByKey((x,y) => x+y)

    movieStyle.foreach(println)
//喜剧

    val comedyCount = moviesRDD.map(_.split("::")) // 分割原始数据
      .map(x => (x(0), x(2))) // 提取电影ID和类型字段
      .flatMapValues(_.split("\\|")) // 展开每个电影的类型
      .filter { case (_, genre) => genre == "Comedy" } // 关键修改：过滤喜剧类型
      .map(_ => 1) // 映射为计数单位
      .reduce(_ + _) // 统计喜剧电影总数

    println(s"喜剧电影总数: $comedyCount")

    //2000年

    val comedyCount1 = moviesRDD.map(x => x.split("::"))
      .filter(x => x(1).contains(("2000")))
      .count()

    println(s"2000年生产电影总数量: $comedyCount1")

      sc.stop()
  }

}
