package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import scala.util.Random

object Demo9DoubleReduce {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("filterkey")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext

    val linesRDD: RDD[String] = sc.textFile("data/word")

    val wordsRDD: RDD[String] = linesRDD
      .flatMap(_.split(","))
      .filter(_.nonEmpty)

    val kvRDD: RDD[(String, Int)] = wordsRDD.map((_, 1))

    /**
     * reduceByKey: 可以避免数据倾斜，但是不能做太复杂的业务
     */
    //val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey(_ + _)


    /**
     * 双重聚合
     * 1、给每一个key增加随机前缀
     * 2、聚合一次
     * 3、去掉随机前缀再聚合一次
     *
     */
    //增加随机前缀
    val preRDD: RDD[(String, Int)] = kvRDD.map {
      case (word: String, i: Int) =>
        val pre: Int = Random.nextInt(10)
        val key = s"${pre}_$word"
        (key, i)
    }
    //聚合第一次
    val oneReduceRDD: RDD[(String, Int)] = preRDD.groupByKey()
      .map {
        case (word: String, iter: Iterable[Int]) =>
          (word, iter.sum)
      }

    //去掉前缀
    val noPreRDD: RDD[(String, Int)] = oneReduceRDD.map {
      case (preWord: String, count: Int) =>
        val word: String = preWord.split("_")(1)
        (word, count)
    }
    //再聚合一次
    val countRDD: RDD[(String, Int)] = noPreRDD.groupByKey()
      .map {
        case (word: String, iter: Iterable[Int]) =>
          (word, iter.sum)
      }

    countRDD.foreach(println)

    while (true) {}

  }

}
