package cn.whuc.homework

import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

object Case09 {
  def main(args: Array[String]): Unit = {
    // 1 创建sparkContext
    val sc: SparkContext = new SparkContext(
      new SparkConf()
        .setMaster("local[*]")
        .setAppName(" ")
    )

    // 2 编写代码
    val datas: RDD[String] = sc.textFile("input/employee.txt")
    val rdd1: RDD[(String, String)] = datas.map(line => {
      val strings: Array[String] = line.split("\t")
      (strings(2), line)
    })

    //    val numPartitions: collection.Map[String, Long] = rdd1.countByKey()
    val rdd2: RDD[(String, String)] = rdd1.partitionBy(new EmployeePartitioner())

    rdd2.map((_._2)).saveAsTextFile("emp")


    // 3 关闭上下文对象
    sc.stop()
  }


  class EmployeePartitioner extends Partitioner {
    override def numPartitions: Int = 4

    override def getPartition(key: Any): Int = {
      if (key == "HR") {
        return 0
      } else if (key == "Engineering") {
        return 1
      } else if (key == "Sales") {
        return 2
      } else {
        return 3
      }
    }
  }

}