package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel

object Demo1Cache {
  def main(args: Array[String]): Unit = {
    //创建spark sql环境（新版spark统一的入口）
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //获取sparkContext
    val sc: SparkContext = spark.sparkContext

    //读取数据
    val studentRDD: RDD[String] = sc.textFile("data/students.txt")


    /**
     * 1、在RDD中使用缓存
     *
     * 缓存级别：
     * 1、MEMORY_ONLY：仅内存
     * 2、MEMORY_AND_DISK_SER ： 内存和磁盘，同时会压缩
     */
    studentRDD.persist(StorageLevel.MEMORY_ONLY)

    //统计班级的人数
    studentRDD
      .map(stu => (stu.split(",")(4), 1))
      .reduceByKey((x, y) => x + y)
      .foreach(println)

    //统计年龄的人数
    studentRDD
      .map(stu => (stu.split(",")(2), 1))
      .reduceByKey((x, y) => x + y)
      .foreach(println)

    /**
     * 如果RDD使用完了，后面还有代码，需要清理缓存
     */
    //清除缓存，释放内存
    studentRDD.unpersist()


    /**
     * 2、在DSl中使用缓存
     */

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    import spark.implicits._
    import org.apache.spark.sql.functions._

    //DF缓存
    studentDF.persist(StorageLevel.MEMORY_ONLY)

    studentDF
      .groupBy($"clazz")
      .agg(count($"clazz") as "num")
      .show()

    studentDF
      .groupBy($"sex")
      .agg(count($"sex") as "num")
      .show()

    //侵略缓存
    studentDF.unpersist()

    /**
     * 3、在sql中使用缓存
     */

    studentDF.createOrReplaceTempView("student")

    //缓存表
    spark.sql("""cache table student""")

    spark.sql(
      """
        |select clazz,count(1)as num from
        |student
        |group by clazz
        |
        |""".stripMargin).show()


    spark.sql(
      """
        |select age,count(1)as num from
        |student
        |group by age
        |
        |""".stripMargin).show()

    spark.sql("""uncache table student""")

  }

}
