package com.offcn.bigdata.sql.p2

import org.apache.spark.sql.SparkSession

import scala.util.Random

/**
  * SparkSQL优化的案例，在执行的sparksql过程中发生性能上面的问题
  * 数据倾斜的问题：
  *   https://tech.meituan.com/
  *       https://tech.meituan.com/2016/04/29/spark-tuning-basic.html
  *       https://tech.meituan.com/2016/05/12/spark-tuning-pro.html
  *
  *  利用group by编程出现的数据倾斜问题
  *     两阶段聚合（局部聚合+全局聚合）
  *  以wordcount为例来解决
  *     1. 就是要找到发生数据倾斜的key，通过sample算子，做抽样
  *     2、 core
  *         （hello, 1）, （hello, 1）, （hello, 1）, （hello, 1）
  *     两阶段聚合=局部聚合+全局聚合 -->双重group by
  *         首先对key进行随机打散，在key的前面＋随机前缀，比如2以内(0, 1)的随机前缀
  *         （0_hello, 1）, （0_hello, 1）, （1_hello, 1）, （1_hello, 1）
  *         这样把原来的key做了均匀的拆分，对拆分之后的数据进行统计
  *         （0_hello, 2） （1_hello, 2）   ---->局部聚合的结果
  *         去掉前缀
  *             （hello, 2）, （hello, 2） ----》全局聚合---（hello, 4）
  */
object SparkSQLOptimizerOps {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession.builder()
                .appName("SparkSQLOptimizerOps")
                .master("local[*]")
                .getOrCreate()

        val list = List(
            "zhang zhang wen wen wen wen yue yue",
            "gong yi can can can can can can can can can can",
            "chang bao peng can can can can can can"
        )
        import spark.implicits._
        val dataset = spark.createDataset(list).toDF("line")
        dataset.createOrReplaceTempView("wc")
        println("----wordcount-----")
        var sql =
            """
              |select
              |  tmp.word,
              |  count(1) count
              |from (
              |  select
              |    explode(split(line, '\\s+')) word
              |  from wc
              |) tmp
              |group by tmp.word
            """.stripMargin
        spark.sql(sql).show()

        println("----------------step 1.-添加随机前缀---------------------")

        sql =
            """
              |select
              | t1.word,
              | concat_ws("_", cast(floor(rand() * 2) as string), t1.word) prefix_word
              |from (
              |  select
              |    explode(split(line, '\\s+')) word
              |  from wc
              |) t1
            """.stripMargin
        spark.sql(sql).show()

        println("----------------step 2.-全局聚合---------------------")
        sql =
            """
              |select
              | concat_ws("_", cast(floor(rand() * 2) as string), t1.word) prefix_word,
              | count(1) prefix_count
              |from (
              |  select
              |    explode(split(line, '\\s+')) word
              |  from wc
              |) t1
              |group by prefix_word
            """.stripMargin
        spark.sql(sql).show()
        println("----------------step 3.-局部聚合---------------------")
        sql =
            """
              |select
              |  substring(t2.prefix_word, instr(t2.prefix_word, "_") + 1) word,
              |  sum(t2.prefix_count) count
              |from (
              |  select
              |   concat_ws("_", cast(floor(rand() * 2) as string), t1.word) prefix_word,
              |   count(1) prefix_count
              |  from (
              |    select
              |      explode(split(line, '\\s+')) word
              |    from wc
              |  ) t1
              |  group by prefix_word
              |) t2
              | group by word
            """.stripMargin
        spark.sql(sql).show()
        spark.stop()
    }

    def addRandomPrefix(str: String) = {
        val random = new Random()
        random.nextInt(2) + "_" + str
    }
}
