package com.zhaosc.spark.core.skewed
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import scala.util.Random
import org.apache.spark.rdd.RDD.rddToOrderedRDDFunctions
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions

/**
 *   采样倾斜key并分拆join操作
 */
object SkewedJoin {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local")
      .setAppName("SkewedJoin");

    val sc = new SparkContext(conf);

    val nameList = List(new Tuple2("bjsxt", 1), new Tuple2("bjsxt", 1), new Tuple2("bjsxt", 1), new Tuple2("shsxt", 1), new Tuple2("gzsxt", 1));
    val scoreList = List(new Tuple2("bjsxt", 100), new Tuple2("shsxt", 95), new Tuple2("gzsxt", 90));

    val nameRdd = sc.parallelize(nameList);
    val scoreRdd = sc.parallelize(scoreList);
    //  因为nameRDD有导致数据倾斜的key，但是不知道是哪些key，要采样
    // 对RDD进行抽样，其中参数withReplacement为true时表示抽样之后还放回，可以被多次抽样，false表示不放回；
    //fraction表示抽样比例；seed为随机数种子，比如当前时间戳
    val sample = nameRdd.sample(false, 0.8);

    val name = sample.reduceByKey(_ + _)
      .map(v => {
        v.swap
      }).sortByKey(false)
      .take(1)(0)._2

    val bcName = sc.broadcast(name);

    /**
     * 找到导致数据倾斜的key，拆分 nameRDD和scoreRDD
     */

    val skewedNameRDD = nameRdd.filter(x => {
      x._1.equals(bcName.value)
    });

    val commonName = nameRdd.subtract(skewedNameRDD);

    val skewedScoreRDD = scoreRdd.filter(x => {
      x._1.equals(bcName.value)
    });

    val commonScore = scoreRdd.subtract(skewedScoreRDD);

    /**
     * skewedNameRDD打上n以内的随机数
     * skewedScoreRDD膨胀n倍
     */
    val random = new Random();

    val prefixSkewedNameRDD = skewedNameRDD.map(v => {
      val prefix = random.nextInt(2);
      Tuple2(prefix + "_" + v._1, v._2)
    });

    //prefixSkewedNameRDD.foreach(println _)

    //  skewedScoreRDD.foreach(println _)
    val expandSkewedScoreRDD = skewedScoreRDD.flatMap(v => { //(bjsxt,100)
      var list = List[Tuple2[String, Int]]();
      for (i <- 0 until 2) {
        list = Tuple2(i + "_" + v._1, v._2) :: list
      }
      list
    });
    //expandSkewedScoreRDD.foreach(println _)
    val prefixJoin1 = prefixSkewedNameRDD.join(expandSkewedScoreRDD, 2);

    val join1=prefixJoin1.map(v=>{
      Tuple2(v._1.split("_")(1),v._2)
    });
    
    val join2=commonName.join(commonScore)
    
    join1.union(join2).collect().foreach(println _)

    sc.stop()
  }
}