package org.example

import org.apache.spark.sql.SparkSession

import java.util.regex.Pattern

object sparkData3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
//    观影数据分析
    val filePath = "/src/main/resources/"
    val movies = sc.textFile(filePath + "movies.det")
    val occupations = sc.textFile(filePath + "occuptions.dat")
    val ratings = sc.textFile(filePath + "ratings.dat")
    val users = sc.textFile(filePath + "users.dat")
//    读取数据，统计数据条数
    println("电影数:" + movies.count())
    println("用户数:" + users.count())
    println("职业数:" + occupations.count())
    println("评分数:" + ratings.count())

//    val userWatched = ratings.map(_.split("::"))
//      .map(users => (user(1), user(0)))
//      .filter(_._2.equals(1))
//      println("Id为1的用户观看的电影数是：" + userWatched.count())

//    val rating = ratingsRDD.map(x => x.split("::")).map {
//      x => {
//        (x(0), x(1), x(2)) // (UserID,MovieID,Rating)
//         }
//      }.cache()
//        println("平均得分最高的前 10 名的电影名称简单版")


 val movieStyle = movies.map(_.split("::"))
  .map(style => (style(0),style(2)))
  .flatMapValues(x => x.split("\\|"))
  .map(x => (x._2,1))
  .reduceByKey((x,y) => x+y)
  .filter(_._1.equals("Fantasy"))

//    movieStyle.take(5).foreach(println)
   movieStyle.foreach(println)


    val pattern = Pattern.compile("(.*)(\\(\\d{4}\\))")
    val movieYear = movies.map(_.split("::"))
      .map(x => (x(1),1))
      .map(x =>{
        var name = ""
        var year = ""
        val matcher = pattern.matcher(x._1)
        if (matcher.find()) {
          name = matcher.group(1)
          year = matcher.group(2)
          year = year.substring(1,year.length() - 1)

        }
//        if (year == "") {
//          (-1,1)
//        } else{
//          （year.toInt
//          , 1
//          ）
//        }
      })



    sc.stop()
  }

}
