val data = sc.parallelize(Seq("apple", "banana", "orange", "pear", "watermelon", "grape", "pineapple"))

import org.apache.spark.{Partitioner, SparkContext, SparkConf}
//自定义分区类，需要继承org.apache.spark.Partitioner类
class MyPartitioner(numParts:Int) extends Partitioner{
  //覆盖分区数
  override def numPartitions: Int = numParts 
  //覆盖分区号获取函数
  override def getPartition(key: Any): Int = {
    if (key.length <=5) {
      0
    } else {
      1
    }
  }
}
object TestPartitioner {
  def main(args: Array[String]) {
    val conf=new SparkConf()
    val sc=new SparkContext(conf)
    //模拟5个分区的数据
    val data= sc.parallelize(Seq("apple", "banana", "orange", "pear", "watermelon", "grape", "pineapple"))
    //根据尾号转变为10个分区，分别写到10个文件
   val rdd= data.map((_,1)).partitionBy(new MyPartitioner(2))
   val resultRDD = rdd.mapPartitions(iter => Iterator(iter.size))
  // 打印计数结果
  resultRDD.collect().foreach(println)
  }
} 



