package com.gjy.learning.scala

/**
 * --driver-cores 2   \
 * --driver-memory 8g \
 * --executor-cores 4 \
 * --num-executors 10 \
 * --executor-memory 8g \
 */

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.SparkSession

object ConfigurationSet{
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("Broadcast Join Example")
      .master("local[*]")
      .config("driver-cores","1")//默认为 1
      .config("driver-memory","1g")//默认 512M
      .config("executor-cores","1")//默认为 1
      .config("num-executors","2")//单节点Executor的数量,默认为 2
      .config("executor-memory","1g")//默认 1G
      .appName("ConfigurationSet")
      .getOrCreate()

    val smallData = Seq(
      (1, "Alice"),
      (2, "Bob"),
      (3, "Cathy")
    )
    val smallDF = spark.createDataFrame(smallData).toDF("id", "name")

    // 广播小数据集
    val smallDataBroadcast = spark.sparkContext.broadcast(
      smallDF.collect().map(row => (row.getInt(0), row.getString(1)))
    )
    // 创建大数据集
    val largeData = Seq(
      (1, 100),
      (2, 200),
      (3, 300),
      (4, 400)
    )
    val largeDf = spark.createDataFrame(largeData).toDF("id", "value")

    // 使用广播的 DataFrame 进行 join
    val smallBroadcastedDf = spark.createDataFrame(smallDataBroadcast.value).toDF("id", "name")


    val joinedDf = largeDf.join(smallBroadcastedDf, "id")

    // 显示结果
    joinedDf.show()

    // 关闭 Spark 会话
    spark.stop()

  }
}
