package com.offcn.bigdata.spark.sql.p3

import org.apache.spark.sql.SparkSession

/**
 * @Auther: BigData-LGW
 * @ClassName: SparkDataSkew
 * @Date: 2020/12/10 22:45
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object SparkDataSkew {
    def main(args: Array[String]): Unit = {
        val spark = SparkSession.builder()
            .appName("SparkDataSkew")
            .master("local[*]")
            .getOrCreate()
        import spark.implicits._
        val list = List(
            "zhang zhang wen wen wen wen yue yue",
            "gong yi can can can can can can can can can can",
            "chang bao peng can can can can can can"
        )
        val df = spark.createDataset(list).toDF("line")
        df.createOrReplaceTempView("wc")
        println("-----未处理数据倾斜之前的wordcount的版本-----------")
        var sql =
            """
              |	select
              |		tmp.word,
              |		count(tmp.word) counts
              |	from (
              |		select
              |			explode(split(line, "\\s+")) word
              |		from wc
              |	) tmp
              |	group by tmp.word
              |""".stripMargin
        spark.sql(sql).show()
        println("-----开始处理数据倾斜之前的wordcount的版本-----------")
        println("----step.1.1 开始对数据及进行拆分---------")
        sql =
            """
              |select
              |explode(split(line,"\\s+")) word
              |from wc
              |""".stripMargin
        spark.sql(sql).show()
        println("----step.1.2 开始对数据及进行拆分之添加N以内的随机前缀---------")
        sql =
            """
              |select
              |  t1.word,
              |  concat_ws("_", cast(floor(rand() * 2) as string), t1.word) prefix_word
              |from (
              |  select
              |    explode(split(line, "\\s+")) word
              |  from wc
              |) t1
              |""".stripMargin
        spark.sql(sql).show()
        println("----step.2 基于随机前缀的局部聚合---------")
        sql =
            """
              |select
              |concat_ws("_",cast(floor(rand() * 2) as string),t1.word) prefix_word,
              |count(1) counts
              |from (
              |select
              |explode(split(line,"\\s+")) word
              |from wc
              |) t1
              |group by prefix_word
              |""".stripMargin
        spark.sql(sql).show()

        println("----step.3 在局部聚合的基础上去掉随机前缀进行全局聚合---------")
        sql =
            """
              |select
              |substr(t2.prefix_word,instr(t2.prefix_word, "_") + 1) word,
              |sum(t2.counts) counts
              |from (
              |  select
              |    concat_ws("_", cast(floor(rand() * 2) as string), t1.word) prefix_word,
              |    count(1) counts
              |  from (
              |    select
              |      explode(split(line, "\\s+")) word
              |    from wc
              |  ) t1
              |  group by prefix_word
              |) t2
              |group by word
              |""".stripMargin
        spark.sql(sql).show()
        spark.stop()
    }
}
