package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel

object Demo12Cache {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("hive")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING, age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    /**
     * 对同一个表多次使用时,可以将这个表缓存起来
     */
    studentDF.persist(StorageLevel.MEMORY_ONLY)

    studentDF
      .groupBy($"clazz")
      .agg(count($"clazz") as "num")
      .show()

    studentDF
      .groupBy($"sex")
      .agg(count($"sex") as "num")
      .show()

    /**
     * 清除缓存,释放内存
     */
    studentDF.unpersist()
    while (true) {}

    /**
     * 写sql缓存表
     * cache table student;
     * 清除缓存
     * uncache table student;
     */
  }
}
