package cn.wangjie.spark.operations.partitioner

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext, TaskContext}

/**
 * 自定义分区器Partitioner，重新将RDD各个分区数据分到不同分区中
 */
object SparkPartitionerTest {

  def main(args: Array[String]): Unit = {
		// 构建SparkContext上下文实例对象
		val sc: SparkContext = {
			// 1.a 创建SparkConf对象，设置应用属性，比如应用名称和运行模式
			val sparkConf = new SparkConf()
				.setAppName(this.getClass.getSimpleName.stripSuffix("$"))
				.setMaster("local[2]")
			// 1.b 创建实例对象
			SparkContext.getOrCreate(sparkConf)
		}
		
		// 模拟数据
		val inputRDD: RDD[(String, Int)] = sc.parallelize(
			Seq(
				"Hadoop" -> 11, "Spark" -> 34, "spark" -> 30, "hive" -> 12, "$ddd" -> 1111
			)
		)
		inputRDD.foreachPartition{iter =>
			println(s"p-${TaskContext.getPartitionId()}: ${iter.mkString(", ")}")
		}
		
		println("=================================================")
		val resultRDD: RDD[(String, Int)] = inputRDD.partitionBy(new UpperLowerCasePartitioner())
		resultRDD.foreachPartition{iter =>
			println(s"p-${TaskContext.getPartitionId()}: ${iter.mkString(", ")}")
		}
		
		// 应用结束，关闭资源
		sc.stop()
	}
	
}
