package cn.itcast

import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

/**
 * Author itcast
 * Date 2020/5/3 13:58
 * Desc 演示Spark3.0新特性-动态分区裁剪/优化/检测
 */
object DynamicPartitionPruning {
  def main(args: Array[String]): Unit = {
    //1.准备Spark执行环境并设置参数
    val spark: SparkSession = SparkSession.builder()
      .master("local[*]")
      .appName("DPP")
      //是否开启动态分区裁剪,默认为true表示开启,设置为false表示关闭
      .config("spark.sql.optimizer.dynamicPartitionPruning.enabled","true")
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")

    import spark.implicits._

    //2.准备表和数据
    spark.range(10000)
      .select($"id",$"id".as("k"))
      .write
      .partitionBy("k")
      .format("parquet")
      .mode("overwrite")
      .saveAsTable("tab1")

    spark.range(100)
      .select($"id",$"id".as("k"))
      .write
      .partitionBy("k")
      .format("parquet")
      .mode("overwrite")
      .saveAsTable("tab2")

    //3.执行SQL
    //查看执行计划
    spark.sql("select * from tab1 t1 join tab2 t2 on t1.k = t2.k and t2.id < 2").explain()
    //查看执行结果
    spark.sql("select * from tab1 t1 join tab2 t2 on t1.k = t2.k and t2.id < 2").show()

    //4.等待停止
    Thread.sleep(Long.MaxValue)
  }
}
