package org.example

import org.apache.spark.sql.{SparkSession, DataFrame}
import org.apache.spark.sql.functions.desc
import org.apache.spark.sql.types.{DataTypes, IntegerType, StringType, StructField, StructType}
object yun9 {
  def main(args: Array[String]): Unit = {
    // 创建 Spark 运行环境
    val spark = SparkSession
      .builder()
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
    val schemaUser = StructType(Seq(
      StructField("id", IntegerType), // 学号（非空）
      StructField("gender", StringType), // 性别
      StructField("age", IntegerType),
      StructField("occupation", IntegerType),
      StructField("location",StringType)
    ))
    val user = spark.read.option("sep","::").schema(schemaUser)
      .csv("src/main/resources/users.dat")
    // 定义评分表的结构
    val schemaRating = StructType(Seq(
      StructField("userId", IntegerType),
      StructField("movieId", IntegerType),
      StructField("rating", IntegerType),
      StructField("timestamp", StringType)
    ))
    // 读取评分表数据
    val rating = spark.read.option("sep", "::").schema(schemaRating)
      .csv("src/main/resources/ratings.dat")
    // 定义电影表的结构
    val schemaMovie = StructType(Seq(
      StructField("movieId", IntegerType),
      StructField("title", StringType),
      StructField("genres", StringType)
    ))
    // 读取电影表数据
    val movie = spark.read.option("sep", "::").schema(schemaMovie)
      .csv("src/main/resources/movies.dat")
    //4.连接join(DataFrame,"列名","连接方式left_outer")
    //5.练习:求18岁女生评分电影为5分的所有电影名
    val result = user
      .filter(user("gender") === "F" && user("age") === 18)
      .join(rating, user("id") === rating("userId"))
      .filter(rating("rating") === 5)
      .join(movie, rating("movieId") === movie("movieId"))
      .select(movie("title"))
      .distinct()

    result.show()

    sc.stop()

  }
}