package com.haozhen.rdd

/**
  * @author haozhen
  * @email haozh@ync1.com
  * @date 2021/1/28  12:20
  */
object RddDemo {

  def main(args: Array[String]): Unit = {
    import org.apache.spark.rdd.RDD
    import org.apache.spark.{SparkConf, SparkContext}
    val init: SparkConf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getCanonicalName.init)

    val context = new SparkContext(init)

//    val rdd = context.textFile("data/spark/README.md")
//
//    val wordmap = rdd.flatMap(_.split(" ")).map((_,1))
//
//    println(wordmap)
//
//    wordmap.dependencies.foreach(dep=>{
//      println("dependency type:"+ dep.getClass)
//      println("dependency RDD:"+ dep.rdd)
//      println("dependency partitions:"+ dep.rdd.partitions)
//      println("dependency partitions size:"+ dep.rdd.partitions.size)
//    })
//
//    val wordruduce: RDD[(String, Int)] = wordmap.reduceByKey(_+_)
//
//    println(wordruduce)
//
//    wordruduce.dependencies.foreach(dep=>{
//      println("dependency type:"+ dep.getClass)
//      println("dependency RDD:"+ dep.rdd)
//      println("dependency partitions:"+ dep.rdd.partitions)
//      println("dependency partitions size:"+ dep.rdd.partitions.size)
//    })

//      val a: RDD[Int] = context.parallelize(1 to 9,3)
//
//      def iterfunc [T] (iter: Iterator[T]):Iterator[(T,T)]={
//        var res = List[(T,T)]()
//        var pre = iter.next
//        while (iter.hasNext){
//          val cur = iter.next
//          res ::= (pre,cur)
//          pre = cur
//        }
//        res.iterator
//      }
//      val result : Array[(Int, Int)] = a.mapPartitions(iterfunc).collect()
//      result.foreach(println)

//        val rdd = context.textFile("data/spark/README.md")
//
//        val wordGroupBy = rdd.flatMap(_.split(" ")).map((_,1)).groupByKey(new org.apache.spark.HashPartitioner(4))
//
//        println(wordGroupBy.partitioner)

        val tuples: Seq[(Range.Inclusive, Seq[String])] = Seq((1 to 9,Seq("test1","test3")),(10 to 19,Seq("test2","test3")),(20 to 29,Seq("test1","test2")))
        val rdd: RDD[(Range.Inclusive, Seq[String])] = context.makeRDD(tuples)
        println(rdd.partitions.size)

    context.stop()
  }

}
