package com.zhaosc.spark.core

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.Partitioner

object RepartitionAndSortWithinPartitionsOperator {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local")
      .setAppName("SkewedJoin");

    val sc = new SparkContext(conf);
    val nameList = List(
      Tuple2(1, "xuruyun"),
      Tuple2(2, "liangyongqi"),
      Tuple2(5, "liangyongqi"),
      Tuple2(6, "liangyongqi"),
      Tuple2(4, "liangyongqi"),
      Tuple2(3, "wangfei"));

    val nameRdd = sc.parallelize(nameList);

    //分区重排序

    //    val partitioner: Partitioner =new Partitioner{
    //      override def numPartitions(): Int = 2;
    //      override def getPartition(key: Any): Int = {
    //        key.hashCode() % numPartitions
    //      }
    //    };

    val repartitionRdd = nameRdd.repartitionAndSortWithinPartitions(new Partitioner {
      override def numPartitions(): Int = 2;
      override def getPartition(key: Any): Int = {
        key.hashCode() % numPartitions
      }
    });

        repartitionRdd.mapPartitions(iter => {
     var list = List[Tuple2[Int, String]]();
      while (iter.hasNext) {
        val next=iter.next();
        list=next:: list
        System.out.println("Content:"+next);
      }
        list.iterator;
    }, true).collect()
    
//    repartitionRdd.mapPartitionsWithIndex((x, iter) => {
//      var list = List[Tuple2[Int, String]]();
//      while (iter.hasNext) {
//        val next=iter.next();
//        list=next:: list
//        System.out.println("partitionId:"+x+"\tContent:"+next);
//      }
//      list.iterator;
//    }, true).collect()
    


  }
}