import org.apache.spark.sql.SparkSession
object data6_core {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    val filePath = "src/main/resources/"
    val movies = sc.textFile(filePath + "movies.dat")
    val occupations = sc.textFile(filePath + "occupations.dat")
    val ratings = sc.textFile(filePath + "ratings.dat")
    val users = sc.textFile(filePath + "users.dat")
    println("电影数:" + movies.count())
    println("用户数：" + users.count())
    println("职业数：" + occupations.count())
    println("评分数：" + ratings.count())
    val uesrWatched = ratings.map(_.split("::"))
      .map(user => (user(1), user(0)))
      .filter(_._2.equals("1")) //获取用户Id为1的所有电影
    println("Id为1的用户观看的电影数是：" + uesrWatched.count())
    val moviesInfo = movies.map(_.split("::"))
      .map(movie => {
        (movie(0), (movie(1), movie(2)))
      })
    uesrWatched.join(moviesInfo)
      .map(item => {
        (item._1, item._2._1, item._2._2._1, item._2._2._2)
      }).foreach(println)
    println("----------------------------------------------------------------------------------------")
    val rating = ratings.map(x => x.split("::")).map {
      x => {
        (x(0), x(1), x(2))
      }
    }.cache()
    println("平均得分最高的前 10 名的电影名称简单版")
    rating.map(x => (x._2, (x._3.toDouble, 1)))
      .reduceByKey((x, y) => {
        (x._1 + y._1, x._2 + y._2)
      })
      .map(x => (x._2._1 / x._2._2, x._1))
      .sortByKey(false)
      .take(5)
      .foreach(println)
    println("按平均分取前 10 部电影输出详情:(平均分,(movieId,Title,Genres,总分,总次 数))")
    val ratingsInfo = rating.map(x => (x._2, (x._3.toDouble, 1)))
      .reduceByKey((x, y) => {
        (x._1 + y._1, x._2 + y._2)
      })
      .map(f = x => (x._1, (x._2._1 / x._2._2, x._2._1, x._2._2)))
    moviesInfo.join(ratingsInfo)
      .map(info => {
        (info._2._2._1, (info._1, info._2._1._1, info._2._1._2, info._2._2._2, info._2._2._3))
      }).sortByKey(false)
      .take(5)
      .foreach(println)
    println("观影人数最多的前 10 部电影")
    val watchViewsInfo = rating.map(x => {
        (x._2, 1)
      }).reduceByKey((x, y) => x + y)
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .take(5)
    watchViewsInfo.foreach(println(_))
    println("---------------------------------------------------------------------=")
    rating.map(x => (x._2, 1))
      .reduceByKey((x, y) => {
        (x + y)
      })
      .map(x => (x._2, x._1))
      .sortByKey(false)
      .take(10)
      .foreach(println)
    println("详情的输出(  观影人数，电影编号)")
    moviesInfo.join(ratingsInfo).map(x => {
        (x._2._2._3, (x._2._1._1, x._2._1._2, x._2._2._1, x._2._2._1))
      }).sortByKey(false)
      .take(10)
      .foreach(println)
println("------------------------------------------------------------------------------------------------")
    val movieStyle = movies.map(_.split("::"))
      .map(tp => (tp(0), tp(2)))
      .flatMapValues(_.split("\\|"))
      .map(p => (p._2, 1))
      .reduceByKey(_ + _)
    val comedyCount = movieStyle.filter(_._1 == "Comedy").collect()
    println("喜剧片（Comedy）的数量：" + comedyCount.headOption.map(_._2).getOrElse(0))
    movieStyle.take(5).foreach(println)
    val pattern = """(.*) \((\d{4})\)""".r
    val movieInfo = movies.map(_.split("::"))
      .map(x => x(1))
      .flatMap {
        case pattern(name, year) => Some((year.toInt, 1))
        case _ => Some((-1, 1))
      }
      .reduceByKey(_ + _)
      .sortByKey()
    movieInfo.collect().foreach(println)
  }
}
