package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo3Map {
  def main(args: Array[String]): Unit = {

    /**
     * map:传入一行返回一行
     *
     */
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("map")

    val sc = new SparkContext(conf)

    /**
     * 基于集合构建rdd
     */
    val listRDD: RDD[Int] = sc.parallelize(List(1, 2, 3, 4, 5, 6, 7, 8, 9))

    val mapRDD: RDD[Int] = listRDD.map((i: Int) => {
      if (i % 2 == 1) {
        i + 1
      } else {
        i * 2
      }
    })

    mapRDD.foreach(println)


    /**
     * mapPartition:传入一个分区返回一个分区
     *
     */
    val linesRDD: RDD[String] = sc.textFile("data/words")

    //迭代器中是一个分区的数据
    val wordsRDD: RDD[String] = linesRDD.mapPartitions((iter: Iterator[String]) => {
      //使用迭代器的方法处理一个分区的数据
      val iterator: Iterator[String] = iter
        .flatMap((line: String) => line.split(","))
      //返回一个迭代器
      iterator
    })

    wordsRDD.foreach(println)

    /**
     * mapPartitionsWithIndex: 多了分区编号
     *
     */
    val indexRDD: RDD[String] = linesRDD.mapPartitionsWithIndex((index: Int, iter: Iterator[String]) => {
      println(s"当前分区编号：$index")
      val iterator: Iterator[String] = iter
        .flatMap((line: String) => line.split(","))
      iterator
    })

    indexRDD.foreach(println)


  }

}
