package com.study.spark.ml.movie

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  *
  * @author: stephen.shen
  * @create: 2019-04-09 17:46
  */
object Util {

  val PATH = "D:\\CodeDir\\BigdataWork\\bigdata-study\\study-spark-ml-book"
  val PATH_MOVIES = s"${PATH}\\data\\u.item"
  val PATH_USERS = s"${PATH}\\data\\u.user"
  val PATH_RATING = s"${PATH}\\data\\u.data"

  val spark: SparkSession = SparkSession.builder()
    .appName("Spark demo")
    .master("local")
    .getOrCreate()

  val sc:SparkContext = spark.sparkContext

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)
    //printMovieInfo
    //printUserInfo
    printRatingInfo
  }


  def printMovieInfo: Unit = {
    val df = getMovieDataDF()
    df.cache()
    println("First Record: " + df.first())
    println("Total Records: " + df.count())
  }

  def printUserInfo: Unit = {
    val df = getUserDataDF()
    df.cache()
    println("First User: " + df.first())
    println("Total Users: " + df.count())
    println("Total Genders: " + df.groupBy("gender").count().count())
    println("Occupations: " + df.groupBy("occupation").count().collect().toBuffer)
    println("ZipCodes: " + df.groupBy("zipCode").count().collect().toBuffer)
  }

  def printRatingInfo: Unit = {
    val df = getRatingDataDF()
    df.cache()
    println("First Record: " + df.first())
    println("Total Records: " + df.count())
    import org.apache.spark.sql.functions._
    // 每一部电影的平均评分以及评分次数
    df.select("movie_id","rating")
      .groupBy("movie_id")
      .agg(avg("rating").as("rating_avg"),count("rating").as("rating_count"))
      .orderBy(desc("rating_avg"))
      .show()
    // 所有电影中的最高评分
    df.select("rating").agg(max("rating")).show()
    // 所有电影中的最低评分
    df.select("rating").agg(min("rating")).show()
  }

  /**
    * 获取电影数据的DF
    * @return
    */
  def getMovieDataDF(): DataFrame = {
    val customSchema = StructType(Array(
      StructField("id", StringType, true),
      StructField("name", StringType, true),
      StructField("date", StringType, true),
      StructField("null", StringType, true),
      StructField("url", StringType, true)
    ))
    val df = spark.read.format("com.databricks.spark.csv")
      .option("delimiter", "|")
      .schema(customSchema)
      .load(PATH_MOVIES)
    // 创建临时表
    df.createOrReplaceTempView("movie_data")
    df
  }

  /**
    * 获取用户数据的DF
    * @return
    */
  def getUserDataDF(): DataFrame = {
    val customSchema = StructType(Array(
      StructField("id", IntegerType, true),
      StructField("age", IntegerType, true),
      StructField("gender", StringType, true),
      StructField("occupation", StringType, true),
      StructField("zipCode", StringType, true)
    ))
    val df = spark.read.format("com.databricks.spark.csv")
      .option("delimiter", "|")
      .schema(customSchema)
      .load(PATH_USERS)
    // 创建临时表
    df.createOrReplaceTempView("user_data")
    df
  }

  def getRatingDataDF(): DataFrame ={
    val customSchema = StructType(Array(
      StructField("user_id", IntegerType, true),
      StructField("movie_id", IntegerType, true),
      StructField("rating", IntegerType, true),
      StructField("timestamp", LongType, true)
    ))
    val df = spark.read.format("com.databricks.spark.csv")
      .option("delimiter", "\t")
      .schema(customSchema)
      .load(PATH_RATING)
    // 创建临时表
    df.createOrReplaceTempView("rating_data")
    df
  }

  /**
    * 提取年份
    * @param x
    * @return
    */
  def convertYear( x:String) : Int = {
    try
      return x.takeRight(4).toInt
    catch {
      case e: Exception => println("exception caught: " + e + " Returning 1900");
        return 1900
    }
  }
}
