package chapter04

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, desc}

object Test05_Ratings {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("Ratings")
      .getOrCreate()
    import spark.implicits._
    //读取user.dat
    val sc = spark.sparkContext
    val value = sc.textFile("input/users.dat")
    println(value.take(1).toList)
    val df = value.map(e => e.split("::"))
      .map(e => (e(0).toInt, e(1), e(2).toInt, e(3).toInt, e(4)))
      .toDF("userId", "gender", "age", "occupation", "zip")
    //取数据方法
    //show方法展示数据 可以传数字 表示展示几行 不传默认20
    df.show()
    df.show(5)
    //head 取数据
    val row = df.head(2)
    println(row.toList)
    //take 可以取几个数 直接返回数组
    val rows = df.take(1)
    println(rows.toList)
    //取出所有数据
    val rows1 = df.collect()
    println(rows1.take(2).toList)
    //过滤数据 where filter
    df.where("gender='F' and age=18").show
    df.filter("gender='F' and age=18").show
    df.where("age between 18 and 25").show
    //select 传入多列
    val df1 = df.select("userId", "age")
    df1.show
    //自定义函数udf
    spark.udf.register("replace",(x:String)=>{
      x match {
        case "M"=>0
        case "F"=>1
      }
    })
    val df2 = df.selectExpr("userId",
      "replace(gender) as sex", "age")
    df2.show(5)
    //返回一列
    val column = df.col("userId")
    //选择一列
    df.select(column).show(5)
    //作为列的条件
    df.filter(column<5).show
    df.filter(column===5).show
    //limit 返回数据的前n条 返回的类型为Df
    val value1 = df.limit(1)
    value1.show
    //排序orderBy/sort
    df.sort($"userId".desc).show(5)
    df.sort(desc("userId")).show(5)
    df.sort(col("userId").desc).show(5)
    //分组
    df.groupBy("gender").count.show
    df.groupBy("gender").max("age").show
    df.groupBy("gender").agg(Map(
      "userId"->"count",
      "age"->"max"
    )).show
    //连接 join
    val value2 = sc.textFile("input/ratings.dat")
      .map(e => e.split("::"))
    val df3 = value2.map(e =>
        (e(0).toInt, e(1).toInt, e(2).toInt, e(3).toLong))
      .toDF("userId", "movieId", "rating", "timestamp")
    df.join(df3,"userId").show
    spark.stop()
  }
}
