package com.shujia.opt

import org.apache.spark.sql.{DataFrame, SparkSession}

object Code01SparkMapJoin {
  def main(args: Array[String]): Unit = {
    /**
     * 对SparkSQL中使用 广播变量
     *  方式1：dsl => join时给定 hint 语法 指定类型为 broadcast
     *  方式2：SQL => SELECT中可以使用 /* +broadcast(广播的表) */
     * 
     *
     */

    // 需求：将学生数据和成绩数据加载至DF中形成两张表  学生总成绩数据为一个小表 学生数据为大表 对小表进行广播

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("dsl")
      .config("spark.sql.shuffle.partitions", "3")
      .getOrCreate()

    import org.apache.spark.sql.functions._
    import spark.implicits._

    val stuDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      // 1500100001,施笑槐,22,女,文科六班
      .schema("id String,name String,age int,gender String,clazz String")
      .load("spark_code/data/students.txt")


    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      // 1500100001,施笑槐,22,女,文科六班
      .schema("id String,courseID String,score int")
      .load("spark_code/data/score.txt")


    val totalScoreDF: DataFrame = scoreDF.groupBy($"id").agg(sum($"score") as "totalScore")


//    stuDF.createTempView("stuInfo")
//    totalScoreDF.createTempView("total_score_tbl")
//
//
//    spark.sql(
//      """
//        |
//        |SELECT
//        |/* +broadcast(total_score_tbl) */
//        |T1.*
//        |,T2.totalScore
//        |FROM stuInfo T1 JOIN total_score_tbl T2 ON T1.id = T2.id
//        |
//        |""".stripMargin).show()


    stuDF
      .join(totalScoreDF.hint("broadcast"),stuDF("id") === totalScoreDF("id"),"inner")
      .show()


  while (true){}

  }
}
