package com.shujia.spark.opt

import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel

object Demo1Cache {
  def main(args: Array[String]): Unit = {
    /**
     * 1、缓存
     * 当对同一个rdd，df,table多次使用时可以将数据缓存起来
     */
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val studentDF: DataFrame = spark
      .read
      .format("json")
      .load("data/students.json")

    /**
     * 当对同一个df多次使用时可以缓存
     */
    //默认的缓存级别时MEMORY_AND_DISK
    //studentDF.cache()
    //手动指定持久化级别
    //studentDF.persist(StorageLevel.MEMORY_ONLY)

    studentDF
      .groupBy($"clazz")
      .agg(count($"clazz") as "num")
    // .show()

    studentDF
      .groupBy($"gender")
      .agg(count($"clazz") as "num")
    //.show()

    //清理缓存,释放内存
    studentDF.unpersist()


    studentDF.createOrReplaceTempView("student")

    //对同一个表多次使用时可以cache
    spark.sql("cache table student")

    spark.sql(
      """
        |select clazz,count(1) as num
        |from student
        |group by clazz
        |
        |""".stripMargin).show()

    spark.sql(
      """
        |select gender,count(1) as num
        |from student
        |group by gender
        |
        |""".stripMargin).show()

    //侵略缓存
    spark.sql("uncache table student")


    while (true) {

    }
  }

}
