package com.study.spark.scala.dataframe

import org.apache.spark.sql.SparkSession

/**
  * Broadcast Hash Join
  *
  * @author stephen
  * @create 2019-03-17 19:07
  * @since 1.0.0
  */
object BroadcastHashJoinDemo {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[2]")
      .appName("Broadcast Hash Join Demo")
      .getOrCreate()
    // 设置被广播的表的最大大小，默认是10M
    spark.conf.set("spark.sql.autoBroadcastJoinThreshold",1024*1024*10)

    import org.apache.spark.sql.functions._
    import spark.implicits._
    val df1 = Seq((1,"xiaowang"),(2,"xiaozhang"),(3,"xiaoli")).toDF("id","name")

    val df2 = Seq((1,"beijing"),(2,"wuhan"),(3,"shanghai")).toDF("uid","city")

    //df2.cache()
    // 使用Broadcast Hash Join
    val result = df1.join(broadcast(df2),$"id" === $"uid").select("id","name","city")
    // 查看执行计划
    result.explain()
    result.show()

    spark.stop()

  }
}
