package cn.aijson.demo.rdd

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RDDPartiton {
  def main(args: Array[String]): Unit = {
    //TODO 0.env/创建环境
    val conf: SparkConf = new SparkConf().setAppName("spark").setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)
    sc.setLogLevel("WARN")

    //这里创建一个数组，划分3个分区
    val rdd1: RDD[Int] = sc.parallelize(1 to 10) //3
    val rdd2: RDD[Int] = rdd1.repartition(2)
    val rdd3: RDD[Int] = rdd1.repartition(4)
    //coalesce默认只能减少分区,
    val rdd4: RDD[Int] = rdd1.coalesce(3)
    val rdd5: RDD[Int] = rdd4.coalesce(5)

    getPartitionContext(rdd1)
    println("##################################################")
    getPartitionContext(rdd2)
    println("##################################################")
    getFirstPartitionContext(rdd3)
    getPartitionContext(rdd3)
    println("##################################################")
    getPartitionContext(rdd4)
    println("##################################################")
    getPartitionContext(rdd5)

    //词频统计进行分区操作
    val lines: RDD[String] = sc.textFile("data/input/words.txt") //2
    val result: RDD[(String, Int)] = lines.flatMap(_.split(" "))
      .mapPartitions(iter => {
        //针对每个分区的所有记录都进行如下操作
        println("启动分区迭代器----")
        iter.map((_, 1))
      })
      .reduceByKey(_ + _)

    result.foreachPartition(iter => {
      iter.foreach(println)
    })
  }

  def getFirstPartitionContext(rdd: RDD[Int]): Unit = {
    //查看分区的内容和情况 ,获取第一个partition的RDD
    val firstPartitionRDD = rdd.mapPartitionsWithIndex {
      (idx, iter) => {
        if (idx == 0) {
          iter
        } else {
          Iterator.empty
        }
      }
    }

    //查看分区数：
    println("第一个分区数据量:", firstPartitionRDD.count())
    println("第一个分区内内容:")
    println(firstPartitionRDD.collect.foreach(println))
  }

  def getPartitionContext(rdd: RDD[Int]): Unit = {
    //查看分区数：
    println("rdd分区数量:", rdd.partitions.size)

    //查看分区的内容和情况 ,获取第一个partition的RDD
    rdd.mapPartitionsWithIndex {
      //针对每个分区的所有记录都进行如下操作
      (partid, iter) => {
        var part_map = scala.collection.mutable.Map[String, List[Int]]()
        var part_name = "part_" + partid
        part_map(part_name) = List[Int]()
        while (iter.hasNext) {
          part_map(part_name) :+= iter.next() //:+= 列表尾部追加元素
        }
        println("rdd分区索引:", partid, "rdd分区数据:", part_map(part_name))
        part_map.iterator
      }
    }.collect
  }
}
