package com.inspur
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext


object transform {
  def main(args: Array[String]): Unit = {
 //first::::实例化SparkContext上下文:
    val conf = new SparkConf
    conf.setAppName("Test")
    conf.setMaster("local")
     val sc =  new SparkContext(conf)
 //one.map转换：参数是函数，函数应用于RDD每一个元素，返回值是新的RDD
      println("one.map-------------------------------------------------")
     val arr = Array("hello","jeck","and","halli")
    val rdd1 = sc.parallelize(arr)
    val rdd_map=rdd1.map(z=>(z,1))
    println(rdd1.collect().mkString(","))
    println(rdd_map.collect().mkString(","))
    println("two.filter-------------------------------------------------")
 //two.filter 参数是函数，函数会过滤掉不符合条件的元素，返回值是新的RDD
    val rdd_filter=rdd1.filter(x=>x.contains("l"))
     println(rdd_filter.collect().mkString(","))
     println("three.flatmap-------------------------------------------------")
  //three;flatmap 参数是函数，函数应用于RDD每一个元素，将元素数据进行拆分，变成迭代器，返回值是新的RDD
      val lst = List(List(1,3,5),List(2,5,7,9))
      val rdd_lst = sc.parallelize(lst)
      val rdd_flat=rdd_lst.flatMap(x=>x.map(_*2))
      println(rdd_flat.collect().mkString(","))
     println("four.distinct-------------------------------------------------")
 //four;去重  没有参数，将RDD里的元素进行去重操作 
     val arra = Array(0,3,2,7,2,3,3)
    val rdd2 = sc.parallelize(arra)
    val rdd_distinct=rdd2.distinct()
     println(rdd_distinct.collect().mkString(","))
     println("fifve.union-------------------------------------------------")
 //fifve;union  参数是RDD，生成包含两个RDD所有元素的新RDD
     val rdd_union=rdd_flat.union(rdd_distinct)
      println(rdd_flat.collect().mkString(","))
      println(rdd_distinct.collect().mkString(","))
      println(rdd_union.collect().mkString(","))
 //six;   intersection  参数是RDD，求出两个RDD的共同元素
     println("six.intersection-------------------------------------------------")
    val arr3 = Array(2,3,4)
    val rdd3 = sc.parallelize(arr3,3)
    val rdd_intersec=rdd2.intersection(rdd3)
     println(rdd_intersec.collect().mkString(","))
//seven ;subtract  参数是RDD，将原RDD里和参数RDD里相同的元素去掉
     println("seven.subtract-------------------------------------------------")
     val rdd_subtract=rdd2.subtract(rdd3)
      println(rdd_subtract.collect().mkString(","))
 //eight ;cartesian       参数是RDD，求两个RDD的笛卡儿积
      println("eight.cartesian-------------------------------------------------")
     val rdd_cartesian=rdd3.cartesian(rdd2)
      println(rdd_cartesian.collect().mkString(","))
  //nine  ;group by 分组
      println("nine.group by-------------------------------------------------")
      val rdd_groupby=rdd1.groupBy(x=>x.contains("l"))
       println(rdd_groupby.collect().mkString(","))
 //ten   ;coalesce---合并分区
       println("ten.coalesce-------------------------------------------------")
       val arr4 = Array(2,3,4)
    val rdd4 = sc.parallelize(arr4)
    val rdd5=rdd4.coalesce(2)
    val r4out=rdd4.glom().collect()    //glom:类型转换
    val r4ou_1=r4out.toIterator
    while(r4ou_1.hasNext){
      r4ou_1.next().foreach(println)
    }
     println("---------------------")
     val rd5out=rdd5.glom().collect()
     val rd5_1=rd5out.toIterator
      while(rd5_1.hasNext){
      rd5_1.next().foreach(println)
    }
     println("-------------------------------------------------") 
     val words=Array("one","two","two","four")
     val wordpairsRdd=sc.parallelize(words).map(word=>(word,1))      //转化为RDD后进行map处理
     //("one",1)("two",1)("two",1)("four",1)
     val wordcountWithRuduce=wordpairsRdd.reduceByKey(_+_).collect() //通过reduceByKey对不同的key值进行合并
      //("one",1)("two",1)("two",1)("four",1)
     val wordcountWithGroup = wordpairsRdd.groupByKey().map(t=>(t._1,t._2.sum)).collect()//t._1,,元组访问元素的形式
  }
}