package com.shujia.spark.opt

import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

object Demo10DoubleJoin {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[8]")
      .appName("join")
      .config("spark.sql.shuffle.partitions", 5)
      //自动触发map join 当小表的数据量在10m以内时会自动触发
      .config("spark.sql.autoBroadcastJoinThreshold", -1)
      .getOrCreate()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val studentDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING ,name STRING ,age INT, gender STRING ,clazz STRING")
      .load("data/student")


    val scoreDF: DataFrame = spark
      .read
      .format("csv")
      .option("sep", ",")
      .schema("id STRING ,cid STRING , sco DOUBLE")
      .load("data/score")

    /**
     * 大表关联小表时可以将小表广播采用mapJoin
     */
    //val joinDF: DataFrame = scoreDF.join(studentDF.hint("broadcast"), "id")

    /**
     * 如果时大表关联大表，有一个表的数据分布不均，可以使用采样倾斜的key并拆分join
     * 1、将两个表中倾斜的数据单独取出来进行mapjoin
     * 2、剩下的数据正常join
     * 3、再将两个结果合并
     *
     */

    //采样取出倾斜的key
    //1500100359,1500100356,1500100357,1500100358
    val ids: Array[String] = scoreDF
      .sample(0.001) //采样数据
      .groupBy($"id") //统计key的数量
      .agg(count($"id") as "num")
      .orderBy($"num".desc)
      .select($"id")
      .take(4)
      .map(row => row.getAs[String]("id"))



    //取出分数表中倾斜的数据
    //大表
    val qxScoreDF: DataFrame = scoreDF.where($"id".isInCollection(ids))
    //小表
    val qxStudentDF: DataFrame = studentDF.where($"id".isInCollection(ids))

    //倾斜的数据采用mapJoin进行关联
    val qxJoinDF: DataFrame = qxScoreDF.join(qxStudentDF.hint("broadcast"), "id")

    val idsStr: String = ids.mkString("','")
    //取出没有倾斜的数据
    //大表
    val noQxScoreDF: DataFrame = scoreDF.where(expr(s"id not in ('$idsStr')"))
    //大表
    val noQxStudentDF: DataFrame = studentDF.where(expr(s"id not in ('$idsStr')"))

    //没有倾斜的数据正常join
    val noQxJoinDF: DataFrame = noQxScoreDF.join(noQxStudentDF, "id")

    //合并两个结果
    val joinDF: DataFrame = qxJoinDF.union(noQxJoinDF)

    //保存数据
    joinDF.write
      .format("csv")
      .option("sep", ",")
      .mode(SaveMode.Overwrite)
      .save("data/join")

    while (true) {}


  }

}
