package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo6MapJoin {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    val studentDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .load("data/students.txt")

    val scoreDF: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,cid STRING,score DOUBLE")
      .load("data/score.txt")

    /**
     * studentDF.hint("broadcast"): 将小表广播出去
     *
     * 当一个大表关联小表的时候，可以将小表广播出去，使用mapjoin
     * mapjoin 不会产生shuffle,可以提高关联的效率，小表一般要在1G以内
     *
     * mapjoin 会产生两个job
     * 1、第一个job是将小表的数据拉取到Driver端，从Driver端广播到Executor端
     * 2、关联的job
     *
     */
    val joinDF: DataFrame = scoreDF.join(studentDF.hint("broadcast"), "id")

    joinDF.show()
    while (true) {

    }

  }

}
