package com.alison.scala

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

/**
 * 单词频率统计 aA-nN在0分区,其他字母开头的在1分区,其他在2分区
 */
object RddPartitionTest {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("wc").setMaster("local")
    val sc = new SparkContext(conf)
    val line = sc.parallelize(Array(
      "20161123101523 http://java.learn.com/java/javaee.shtml",
      "20161123101523 http://java.learn.com/java/javaee.shtml",
      "20161123101523 http://ui.learn.com/ui/video.shtml",
      "20161123101523  http://bigdata.learn.com/bigdata/teacher.shtml",
      "20161123101523 http://android.learn.com/android/video.shtml",
      "20161123101523 http://h5.learn.com/h5/teacher.shtml",
      "20161123101523  http://h5.learn.com/h5/course.shtml",
      "20161123101523 http://bigdata.learn.com/bigdata/teacher.shtml"),
      2)
    val words = line.flatMap(s =>  s.split("[\\s]+"))
    val maprdd = words.map((_, 1))
    val reducerdd = maprdd.reduceByKey(_ + _)
    val mypartitioner = new WorkCountPartition(3)
    val resultrdd = reducerdd.sortBy(_._2, ascending = false).partitionBy(mypartitioner)
    resultrdd.collect().foreach(println)
  }

  //自定义分区器,继承Partitioner
  class WorkCountPartition(numPartition: Int) extends Partitioner {
    //numPartitions代表分区的数目,可以利用构造函数传入,也可以设定为固定值
    override def numPartitions: Int = numPartition

    //依据输入内容计算分区id(就是属于哪个分区)
    override def getPartition(key: Any): Int = {
      val str: String = key.toString
      val first = str.substring(0, 1)
      if (first.matches("[A-Na-n]]")) {
        0
      } else if (first.matches("o-zO-Z")) {
        1
      } else {
        2
      }
    }
  }


}
