package com.weic.spark.scala.p3.sotrt

import org.apache.spark.{Partitioner, SparkConf, SparkContext}

import scala.collection.mutable

/**
 * @Auther:BigData-weic
 * @ClassName:CustomerPartitionOps
 * @Date:2020/12/8 20:56
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object CustomerPartitionOps {
	def main(args: Array[String]): Unit = {
		val conf = new SparkConf()
    		.setAppName("CustomerPartitionOps")
    		.setMaster("local[*]")
		val sc = new SparkContext(conf)
		val dataRDD = sc.parallelize(List(
			"chinese ls 91",
			"english ww 56",
			"chinese zs 90",
			"math zl 76",
			"english zq 88",
			"chinese wb 95",
			"chinese sj 74",
			"english ts 87",
			"math ys 67",
			"english mz 77",
			"chinese yj 98",
			"english gk 96",
			"math zq 88",
			"chinese wb 95",
			"math sj 74",
			"english ts 87",
			"math ys 67",
			"english mz 77",
			"math yj 98",
			"english gk 96"
		))
		//转化为K-V
		val scoreRDD = dataRDD.map(line => {
			val index = line.indexOf(" ")
			val subject = line.substring(0, index)
			val info = line.substring(index + 1)
			(subject, info)
		})
		//获取keys集合
		val keys = scoreRDD.keys.distinct().collect()
		//进行自定义分区
		scoreRDD.partitionBy(new MyPartition(keys))
			.foreach{case (subject,info) =>{
					println(subject+"---->"+info)
				}}

//			.saveAsTextFile("file:\\F:\\datas\\spark\\customPartition1")

		//释放资源
		sc.stop()
	}

	//自定义分区
	class MyPartition(keys:Array[String]) extends Partitioner{
		val Key2PartitionID = {//将keys转化为KV
		val map =  mutable.Map[String,Int]()
		for (i <- 0 until keys.size ){
			map.put(keys(i),i)
		}
		map}
		//分区个数
		override def numPartitions: Int = keys.length
		//获取key所对应的分区
		override def getPartition(key: Any): Int = {
			Key2PartitionID.getOrElse(key.toString,0)
		}
	}
}
