package com.madhukaraphatak.examples.sparktwo

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by zhang_tao6 on 2017-08-14.
  */
object WordCount {
  def main(args: Array[String]) {
    System.setProperty("hadoop.home.dir", "D:\\hadoop-2.7.3")
    val conf = new SparkConf()
    conf.setMaster("local[1]").setAppName("wordcount")
    val sc = new SparkContext(conf)
    //    val line = sc.textFile("src/main/resources/data.txt")


    val rdd1 = sc.makeRDD(1 to 6, 2)
    //rdd1有两个分区
    val rdd2 = rdd1.mapPartitionsWithIndex { (index, x) => {
      val result = List[Int]()
      var i = 0
      while (x.hasNext) {
//        println(index + ":" + x.next())
        i += x.next()
      }
      result.::(index + "|" + i).iterator
    }
    }
    rdd2.foreach(println)

    //    val array = Array("james", "tony", "ted","sky")
    //    val rdd = sc.parallelize(array)
    //    val rdd2 = rdd.mapPartitionsWithIndex { (index, values) => {
    //      val result = List[String]()
    //      while (values.hasNext) {
    //        println(index + ":" + values.next())
    //        result.::(index + ":" + values.next())
    //      }
    //      result.iterator
    //    }
    //    }
    //    rdd2.foreach(println)


    //    line.map(s=>s).foreach(s=>println("origin " + s))
    //    line.flatMap(s=>s.split(" ")).map(s=>s).foreach(s=>println("flat " + s))
    //    line.flatMap(_.split(" ")).map((_, 1)).reduceByKey(_ + _).collect().foreach(println)


    sc.stop()
  }
}