package com.sugon.ww

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Oper2 {

  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("map")

    val sc = new SparkContext(conf)

    val listRDD: RDD[Int] = sc.makeRDD(1 to 10, 2)

    //    val mapRdd: RDD[Int] = listRDD.repartition(2).mapPartitions(datas => {
    //      datas.map(data => data * 2)
    //    })


    //    val value: RDD[Int] = listRDD.mapPartitions(datas => {
    //      val result = List[Int]()
    //      var cur = 0
    //      while (datas.hasNext) {
    //
    //        cur += datas.next()
    //      }
    //      result.::(cur).iterator
    //    })

    //使用
//    val value: RDD[String] = listRDD.mapPartitions(x => {
//
//      mapFunc[Int, String](x, line => {
//
//        line.toString
//
//      })
//    })

    val value: RDD[(Int, Int)] = listRDD.mapPartitionsWithIndex((x, data: Iterator[Int]) => {
      data.map(data => (x, data * 2))

    })


    value.foreach(println)


  }

  /**
    * rdd.mapPartitions(x => {
    * println("连接数据库")
    * val res = x.map(line=>{
    * print("写入数据：" + line)
    * line
    * })
    * res
    * })
    *
    * rdd1.mapPartitions(x => {
    * println("连接数据库")
    * new Iterator[Any] {
    * override def hasNext: Boolean = {
    * if (x.hasNext) {
    * true
    * } else {
    * println("关闭数据库")
    * false
    * }
    * }
    * override def next(): Any = "写入数据：" + x.next()
    * }
    * })
    *
    */

  //抽象出一个函数，以后所有的 mapPartitions 都可以用
  def mapFunc[T, U](iterator: Iterator[T], f: T => U) = {
    iterator.map(x => {
      f(x)
    })
  }


}
