package org.zjt.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.zjt.spark.SortWordCount.sc

/**
  * DESC :   分区+持久化 （partition+persists）
  *
  *     PS: 分区只能实现 list(Map）/ List(tuple2)对象
  *
  * @author
  * @create 2017-05-10 上午10:58
  **/
object ParitionWC extends App{
  val conf = new SparkConf().setAppName("WordCount").setMaster("local")
  val sc = new SparkContext(conf);


  val var1 = sc.parallelize(List( new Tuple2(1,"21") , new Tuple2(1,"2") ))
  val var2 =  var1.partitionBy(new org.apache.spark.HashPartitioner(1)).persist


  val var3 = sc.parallelize(List( (1,"21") , (1,"2") ))
  val var4 =  var3.partitionBy(new org.apache.spark.HashPartitioner(1)).persist


  var rdd1 = sc.makeRDD(Array((1,"A"),(2,"B"),(3,"C"),(4,"D")),2)
  var rdd2 = rdd1.partitionBy(new org.apache.spark.HashPartitioner(1)).persist


  println(rdd2.partitioner.size +":"+ rdd2.partitions.mkString(","))
  println(var2.partitioner.size +":"+ var2.partitions.mkString(","))

  sc.stop();

}
