package com.shengzai.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo7MapJoin {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("hive")
      .config("spark.sql.shuffle.partitions", 1)
      .config("spark.sql.autoBroadcastJoinThreshold", -1) //禁用自动mapjoin,默认小表小于10M会触发map join
      .getOrCreate()

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING, age INT,sex STRING,clazz STRING")
      .load("data/students.txt")

    val scoreDF: DataFrame = spark
      .read
      .format("csv") //数据格式
      .option("sep", ",") //字段分隔符
      .schema("id STRING,cId STRING,score DOUBLE") //字段名和类型
      .load("data/score.txt")

    /**
     * 当两个表进行表关联时,如果一个是大表一个是小表,可以采用mapjoin
     * 在spark  sql中mapjoin的实现方式是,将小表作为广播变量广播出去,在map端进行表关联
     * 小表一般在100M左右, 广播变量的数据是保存在Executor的内存中的,如果小表数据量太大,会导致Executor内存溢出
     *
     * studentDF.hint("broadcast"): 将小表广播出去
     *
     */

    val joinDF: DataFrame = studentDF.hint("broadcast").join(scoreDF, "id")

    /*    joinDF.write
          .format("csv")
          .option("sep", ",")
          .mode(SaveMode.Overwrite)
          .save("data/join")*/


    studentDF.createOrReplaceTempView("student")
    scoreDF.createOrReplaceTempView("score")

    /**
     * 在sql中使用mapjoin
     *
     * /*+broadcast(a) */: 将小表广播
     */
    spark.sql(
      """
        |select /*+broadcast(a) */ * from
        |student as a
        |join
        |score as b
        |on a.id=b.id
        |""".stripMargin).show()

    while (true) {}

  }
}
