package com.shengzai.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel

object Demo1Cache {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val stuRDD: RDD[String] = sc.textFile("data/students.txt")

    //1.在RDD中缓存

    stuRDD.persist(StorageLevel.MEMORY_ONLY)

    //统计班级人数
    val clazzCount: RDD[(String, Int)] = stuRDD.map(line => {
      (line.split(",").last, 1)
    }).reduceByKey((x, y) => x + y)

    //统计年龄人数
    val ageCount: RDD[(String, Int)] = stuRDD.map(
      line => {
        (line.split(",")(2), 1)
      }
    ).reduceByKey((x, y) => x + y)

    //缓存用完后，后续如果还有其他代码执行需要清除缓存
    stuRDD.unpersist()

    //2.在DSL中运行

    val stuDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("sid string,name string,age int,sex string,clazz string")
      .load("data/students.txt")
    import org.apache.spark.sql.functions._
    import spark.implicits._

    stuDF.persist(StorageLevel.MEMORY_ONLY)

    val clazzDF: DataFrame = stuDF.groupBy($"clazz")
      .agg(count($"clazz") as "num")

    // clazzDF.show()

    val ageDF: DataFrame = stuDF.groupBy($"age")
      .agg(count($"age") as "num")

    // ageDF.show()
    stuDF.unpersist()

    // 3.在SQL中运行

    stuDF.createOrReplaceTempView("student")

    spark.sql("""cache table student""")

    spark.sql(
      """
        |select clazz,count(*)
        |from
        |student group by clazz
        |
        |""".stripMargin).show()

    spark.sql(
      """
        |select age,count(*)
        |from
        |student group by age
        |
        |""".stripMargin).show()

    spark.sql("""uncache table student""")


    while (true){

    }

  }

}
