package com.shujia.sql

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo13DFMapjoin {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .appName("sql")
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", 2)
      .getOrCreate()

    import spark.implicits._

    val scoreDF: DataFrame = spark.read
      .option("sep", ",")
      .schema("s_id STRING,c_id STRING,sco INT")
      .csv("spark/data/score.txt")


    val studentDF: DataFrame = spark.read
      .option("sep", ",")
      .schema("id STRING,name STRING,age INT,gender STRING,clazz STRING")
      .csv("spark/data/students.txt")


    //笛卡尔积
    //scoreDF.crossJoin(studentDF).show()


    //df缓存
    studentDF.cache()

    /**
      * map join  将小表加载到内存
      * 不会产生shuffle
      *
      * df的数据量不能太大
      *
      */


    import org.apache.spark.sql.functions._

    broadcast(studentDF) //广播表
      .join(scoreDF, $"id" === $"s_id")
      .show()


    while (true) {

    }


  }
}
