package cn.whuc.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

object Demo_Partitioner {
  def main(args: Array[String]): Unit = {
    // 1 创建sparkContext
    val sc: SparkContext = new SparkContext(
      new SparkConf()
        .setMaster("local[*]")
        .setAppName(" ")
    )

    // 2 编写代码
    val datas: RDD[(String, String)] = sc.makeRDD(List(("军事", "aaa"), ("民生", "ccc"), ("体育", "ddd"), ("军事", "bbb")), 3)

    // rdd保存分区文件
//    datas.saveAsTextFile("part01")

    // 使用自定义的分区器
    val newRDD: RDD[(String, String)] = datas.partitionBy(new CategoryPartitioner())

    newRDD.saveAsTextFile("part02")


    // 3 关闭上下文对象
    sc.stop()
  }

}
class CategoryPartitioner extends Partitioner {
  override def numPartitions: Int = 3

  override def getPartition(key: Any): Int = {
    if (key=="军事"){
      return 0
    }else if(key=="民生"){
      return 1
    }else{
      return 2
    }
  }
}