package com.shujia.opt

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo4MapJoin {
  def main(args: Array[String]): Unit = {
    //1、创建spark环境
    val spark: SparkSession = SparkSession
      .builder()
      .appName("fp")
      .master("local[4]")
      //spark.sql.autoBroadcastJoinThreshold: 自动触发map join.模型是10M
      .config("spark.sql.autoBroadcastJoinThreshold", 0)
      .getOrCreate()
    import spark.implicits._

    //学生表
    val students: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING,name STRING, age INT,gender STRING, clazz STRING")
      .load("spark/data/students.txt")

    //分数表
    val scores: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("sid STRING,cid STRING,score DOUBLE")
      .load("spark/data/score.txt")

    //dsl使用map join
    val joinDF: DataFrame = students.hint("broadcast").join(scores, $"id" === $"sid")


    joinDF
      .write
      .format("json")
      .mode(SaveMode.Overwrite)
      .save("spark/data/join")


    students.createOrReplaceTempView("students")
    scores.createOrReplaceTempView("scores")

    //sql 使用map join
    spark.sql(
      """
        |select /*+broadcast(a) */ * from
        |students as a
        |join
        |scores as b
        |on a.id=b.sid
        |
        |""".stripMargin)



    while (true) {

    }

  }

}
