package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo4SparkOnHive {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", 1)
      //开启hive元数据支持
      .enableHiveSupport()
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    //使用hive的表
    spark.sql(
      """
        |show tables
        |""".stripMargin).show()

    //编写sql处理hive的表
    spark.sql(
      """
        |select clazz,count(1) as num from
        |students
        |group by clazz
        |""".stripMargin).show()

    //获取表得到DF
    val studentDF: DataFrame = spark.table("students")

    //当DF被多次使用时可以缓存
    studentDF.cache()

    studentDF
      .groupBy($"clazz")
      .agg(count($"id") as "num")
      .show()

    studentDF
      .groupBy($"sex")
      .agg(count($"id") as "num")
      .show()

    //清理缓存
    studentDF.unpersist()

    //需要将代码提交到服务器运行
    //spark-submit --master yarn --deploy-mode client --num-executors 2 --executor-cores 1 --executor-memory 2G --class com.shujia.spark.sql.Demo4SparkOnHive spark-1.0.jar
  }
}
