package spark.partition

import org.apache.spark.{HashPartitioner, SparkContext, SparkConf}

/**
  * Created by root on 16-1-5.
  */
object SparkPartitionTest {

  def test1(): Unit ={
    val conf = new SparkConf().setAppName("spark-partition-test").setMaster("local")
    val sc = new SparkContext(conf)
    val rdd1=sc.parallelize(Array((1,1),(1,2),(2,1),(3,1),(1,2),(2,1),(3,1)),2).sortByKey()
    val rdd2=sc.parallelize(Array((1,'x'),(2,'y'),(2,'z'),(4,'w'),(2,'y'),(2,'z'),(4,'w')),2)
    val cogroupRDD=rdd1.cogroup(rdd2)
    println("------------1------------")
    cogroupRDD.foreach(println)
    cogroupRDD.foreach(println)
    val rdd3=rdd1.partitionBy(new HashPartitioner(2))
    val cogroupRDD2=rdd3.cogroup(rdd2)
    println("------------2------------")
    cogroupRDD2.foreach(println)
    cogroupRDD2.foreach(println)
    val rdd4=rdd1.partitionBy(new HashPartitioner(2)).cache()
    println("------------3------------")
    val cogroupRDD3=rdd4.cogroup(rdd2)
    cogroupRDD3.foreach(println)
    cogroupRDD3.foreach(println)
  }

  def test2(): Unit ={
    val conf = new SparkConf().setAppName("spark-partition-test").setMaster("local")
    val sc = new SparkContext(conf)
    val rdd1 = sc.parallelize(Array((1,1),(1,2),(2,1),(3,1),(1,2),(2,1),(3,1)))
      .partitionBy(new HashPartitioner(2)).persist()
    val rdd2=sc.parallelize(Array((1,'x'),(2,'y'),(2,'z'),(4,'w'),(2,'y'),(2,'z'),(4,'w')))
    /**有优化效果,rdd1不再需要shuffle*/
    rdd1.join(rdd2)
    /**有优化效果,rdd1不再需要shuffle*/
    rdd1.join(rdd2,new HashPartitioner(2))
    /**无优化效果,rdd1需要再次shuffle*/
    rdd1.join(rdd2,new HashPartitioner(3))
  }

  def main(args: Array[String]): Unit = {
    test1()
  }
}
