package org.example

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.desc
import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StringType, StructField, StructType}

object data1_SQL3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .master("local[*]")
      .appName("spark")
      .getOrCreate()
    val sc = spark.sparkContext
//  读取电影用户信息
    val schemaUser= StructType(Seq(
      StructField("id", IntegerType),
      StructField("gender", StringType),
      StructField("age", IntegerType),
      StructField("occupation", IntegerType),
      StructField("zipcode", StringType),
    ))
    val user=spark.read.option("sep","::").schema(schemaUser)
      .csv("src/main/resources/users.dat")

//  替换 udf
//    spark.udf.register("replace",(x:String)=>{
//          x match{
//            case "N"=>0
//            case "F"=>1
//          }
//        })
//    user.selectExpr("id","replace(gender) as sexual","age").show(3)
//    user.select(user.col("zipcode")).collect().foreach(println)
//  2.排序
//    user.orderBy(-user("id")).show(5)
//    user.sort(desc("id")).show(5)
////  3.分组
//    user.groupBy("gender").count().show()
//  4.连接join(DataFrame."列名")


//  练习：  读取评分表， 并将用户表和评分表连接，查找18岁女生评分为5分的所有电影id
val schemaUser1 = StructType(Seq(
  StructField("id", IntegerType),
  StructField("moviedId", IntegerType),
  StructField("rating",DoubleType),
  StructField("timestapp", LongType),

))
    val rating = spark.read.option("sep", "::").schema(schemaUser1)
      .csv("src/main/resources/ratings.dat")
//    val user =spark.read.csv("src/main/resources/users.dat")
//    val rating=spark.read.csv("src/main/resources/ratings.dat")
//    val jo=user.join(rating,user("id")===rating("userId"))



    sc.stop()
  }

}
