package com.shujia.spark.sql

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

object Demo9MapJoin {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("burk")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val score: DataFrame = spark.read
      .format("csv")
      .option("sep", ",")
      .schema("sid STRING , cId STRING,sco INT")
      .load("data/score.txt")

    val student: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING, name STRING , age INT , gender  STRING , clazz STRING")
      .load("data/students.txt")


    /**
      * map join  : spark 会计算两个表的数据量，自动进行map join
      * .hint("broadcast")： 将一个DF 广播
      *
      */
    val joinDF: DataFrame = student.join(score.hint("broadcast"), $"id" === $"sid")

    joinDF
      .write
      .format("csv")
      .mode(SaveMode.Overwrite)
      .save("data/join")


    score.createOrReplaceTempView("score")
    student.createOrReplaceTempView("student")

    //在sql中将一个表广播，实现mapjoin
    spark.sql(
      """
        |
        |select /*+ broadcast(a)  */  * from
        |student as a
        |join score as b
        |on a.id=b.sid
        |
      """.stripMargin)
      .show()


    while (true) {

    }

  }
}
