package action.RDD创建操作

import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

/**
  *
  * @author wdmcode@aliyun.com
  * @version 1.0.0
  * @date 2018/11/8
  */
object SparkTestDemo {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("SparkParallelize")
    conf.setMaster("local[2]")

    val spark = new SparkContext(conf)

    val data1 = Array[(Int, Char)](
      (1, 'a'), (2, 'b'),
      (3, 'c'), (4, 'd'),
      (5, 'e'), (3, 'f'),
      (2, 'g'), (1, 'h'))

    val rangePairs1 = spark.parallelize(data1, 3)
//    rangePairs1.foreach(s => println("rangePairs1" + s))
    val hashPairs1 = rangePairs1.partitionBy(new HashPartitioner(3))
//    hashPairs1.foreach(s => println("hashPairs1" + s))

    val data2 = Array[(Int, String)](
      (1, "A"), (2, "B"),
      (3, "C"), (4, "D"))

    val pairs2 = spark.parallelize(data2, 2)
    val rangePairs2 = pairs2.map(x => (x._1, x._2.charAt(0)))


    val data3 = Array[(Int, Char)]((1, 'X'), (2, 'Y'))
    val rangePairs3 = spark.parallelize(data3, 2)


    val rangePairs = rangePairs2.union(rangePairs3)


    val result = hashPairs1.join(rangePairs)

//    result.foreachWith(i => i)((x, i) => println("[result " + i + "] " + x))

    result.foreach(i => println("[result " + i + "] "))


    println(result.toDebugString)

    spark.stop()

  }
}
