package com.bigdata

import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ListBuffer
import scala.util.Random

object CaiYangChaiFen {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("test")
    val sc = new SparkContext(conf)

    val rdd1 = sc.parallelize(Array[(String, Int)](
      ("zhangsan", 18),
      ("zhangsan", 19),
      ("zhangsan", 20),
      ("zhangsan", 21),
      ("lisi", 22),
      ("wangwu", 23)
    ))
    val rdd2 = sc.parallelize(Array[(String, Int)](
      ("zhangsan", 100),
      ("zhangsan", 200),
      ("lisi", 300),
      ("wangwu", 400)
    ))

    // 抽取到了倾斜的key
    val key = rdd1.sample(false, 0.5)
      .map(one => {
        (one._1, 1)
      })
      .reduceByKey(_ + _)
      .sortBy(_._2, false)
      .first()

    // 数据倾斜的rdd
    val rdd1_A = rdd1.filter(one => {
      key.equals(one._1)
    })
    // 数据不倾斜的rdd
    val rdd1_B = rdd1.filter(one => {
      !key.equals(one._1)
    })


    val rdd2_A = rdd2.filter(one => {
      key.equals(one._1)
    })
    val rdd2_B = rdd2.filter(one => {
      !key.equals(one._1)
    })

    // 不倾斜的数据直接join
    val rddG = rdd1_B.join(rdd2_B)


    val rddC = rdd1_A.map(one => {
      val rand: Int = Random.nextInt(4)
      (rand + "_" + one._1, one._2)
    })
    val rddD = rdd2_A.flatMap(one => {
      val list = new ListBuffer[(String, Int)]()
      for (i <- 0 to 3) {
        list.append((i + "_" + one._1, one._2))
      }
      list
    })

    val rddF = rddC.join(rddD).map(one => {
      // 加了前缀的key
      val rand_key = one._1
      val value: (Int, Int) = one._2
      (rand_key.split("_")(1), value)
    })

    rddG.union(rddF).foreach(println)


  }
}
