package dataframe

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._

object DataFrame_FinalDemo02 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local[*]")
    conf.setAppName("DataFrame_FinalDemo02")

    val spark: SparkSession = SparkSession
      .builder()
      .config(conf)
      .getOrCreate()
    import spark.implicits._

    val usersDF: DataFrame = spark
      .sparkContext
      .textFile("data/users.dat")
      .map(
        line => {
          val fields = line.split("::")
          User(fields(0).trim.toInt, fields(1).trim, fields(2).trim.toInt, fields(3).trim, fields(4).trim)
        }
      )
      .toDF()
    usersDF.printSchema()
    usersDF.show(5)

    val ratingsDF: DataFrame = spark
      .sparkContext
      .textFile("data/ratings.dat")
      .map(
        line => {
          val fields = line.split("::")
          Rating(fields(0).trim.toInt, fields(1).trim.toInt, fields(2).trim.toInt, fields(3).trim.toLong)
        }
      )
      .toDF()
    ratingsDF.printSchema()
    ratingsDF.show(5)

    usersDF.createOrReplaceTempView("tb_users")
    ratingsDF.createOrReplaceTempView("tb_ratings")

    val movieID=2116
    spark.sql(
      s"""
        |select age,gender,count(*) as total_people
        |from tb_ratings r join tb_users u on r.userID=u.userID
        |where movieID=${movieID}
        |group by age,gender
        |order by age,gender
        |""".stripMargin)
      .groupBy("age")
      .pivot("gender")
      .agg(
        sum("total_people").as("total_people")
      )
      .show()




    spark.stop()
  }

  case class User(userID:Integer,gender:String,age:Integer,occupation:String, zipcode:String)

  case class Rating(userID:Integer,movieID:Integer,rating:Integer,timestamp:Long)
}
