package cn.doitedu.spark

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object SparkPartitioner {
  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.WARN)

    val spark: SparkSession = SparkSession.builder()
      .appName("")
      .master("local[*]")
      .config("spark.default.parallelism", "20")
      .getOrCreate()


    val sc: SparkContext = spark.sparkContext
    val rdd1: RDD[Int] = sc.parallelize(1 to 10000)
    val rdd2: RDD[Int] = sc.parallelize(1 to 10000,100)
    println(rdd1.partitioner)
    println(rdd1.partitions.size)

    val rdd1Repartiton: RDD[Int] = rdd1.repartition(10)

    println(rdd1Repartiton.partitioner)
    println(rdd1Repartiton.partitions.size)

    val rdd1Grouped: RDD[(Int, Iterable[Int])] = rdd1.groupBy(i => i)
    val rdd2Grouped: RDD[(Int, Iterable[Int])] = rdd2.groupBy(i => i)

    val rddUnion = rdd1.union(rdd2)
    val rddUnionGrouped: RDD[(Int, Iterable[Int])] = rddUnion.groupBy(i => i)


    rdd1Grouped.join(rdd2Grouped)

/*    rdd1Grouped.reduceByKey()
    rdd1Grouped.sortBy()*/

  /*  rdd1Grouped.

    println(rdd1Grouped.partitioner)
    println(rdd1Grouped.partitions.size)

    println(rdd2Grouped.partitioner)
    println(rdd2Grouped.partitions.size)


    println(rddUnion.partitioner)
    println(rddUnion.partitions.size)

    println(rddUnionGrouped.partitioner)
    println(rddUnionGrouped.partitions.size)*/

    spark.close()


  }
}
