package com.inspur.spark;
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
//只有行动操作才能进行转换操作

object Transformationrdd {
  def main(args:Array[String]):Unit={
    val conf=new SparkConf
    conf.setAppName("Test")
    conf.setMaster("local")
    val sc=new SparkContext(conf)
    val arr=Array("tom","jack","mary","jone")
    val rdd1=sc.parallelize(arr)
    //1 map
    val rdd_map=rdd1.map(z=>(z,1))//map应用于每个参数，返回结果
    println(rdd1.collect().mkString(","))
    println(rdd_map.collect().mkString(","))
    //Spark内有collect方法，是Action操作里边的一个算子，这个方法可以将RDD类型的数据转化为数组，同时会从远程集群是拉取数据到driver端
    println("========================")
    val rdd_filter=rdd1.filter(x=>x.contains("j"))//
        //2 filter 
     println(rdd_filter.collect().mkString(" "))//输出jack jone
 
    println("========================")
    val lst=List(List(2,3,4,5),List(6,7,8,9))
    val rdd_lst=sc.parallelize(lst)
 
 
    val rdd_flatmap=rdd_lst.flatMap(x=>x.map(_*2))//将各个值*2
      //3 filMap
    println(rdd_flatmap.collect().mkString("--"))
    val arr1=Array(3,4,5,6,7,8,9,3,4,3,5,4,5)
    val rdd_ar1=sc.parallelize(arr1)
    //4 distinct
    val rdd_distinct=rdd_ar1.distinct()
    println("========================")
    println(rdd_distinct.collect().mkString(" "))
    println("========================")
   //5 union
    val rdd_union=rdd_flatmap.union(rdd_distinct)
     println(rdd_flatmap.collect().mkString(" "))
     println(rdd_distinct.collect().mkString(" "))
     println(rdd_union.collect().mkString(" "))
       
     //6 intersection
    val rdd_arr2=sc.parallelize(Array(3,4,5))
    val rdd_intersection=rdd_ar1.intersection(rdd_arr2)
    println(rdd_intersection.collect().mkString("--"))//交集
    println("=========================")
   //7 substract 
    val rdd_subtract=rdd_ar1.subtract(rdd_arr2)
    println(rdd_subtract.collect().mkString("---"))//减去
 //8 cartesian
    println("==========================")
    val rdd_cetesian=rdd_arr2.cartesian(rdd_ar1)
    println(rdd_ar1.collect().mkString("=="))
    println(rdd_arr2.collect().mkString("=="))
   
    println(rdd_cetesian.collect().mkString("--"))//笛卡尔积
      //9 group by
    println("===========================")
    val rdd_groupby=rdd1.groupBy(x=>x.contains("j"))
    println(rdd_groupby.collect().mkString("---")) //ture 一组，false一组
    //10 coalesce
    println("===========================")
    val x =sc.parallelize(Array(1,2,3,4,5),3)
    val y=x.coalesce(2)//2个partition
    y.foreach(println)
    val xOut=x.glom().collect()//rdd = sc.parallelize([1, 2, 3, 4], 2)  >>> sorted(rdd.glom().collect())    [[1, 2], [3, 4]]
    val xOut_1=xOut.toIterator
    while(xOut_1.hasNext)
    {
      xOut_1.next().foreach(println)
    }
    println("=======================")
    val yOut=y.glom().collect()
    val yOut_1=yOut.toIterator
    while(yOut_1.hasNext)
    {yOut_1.next().foreach(println)}
    val words=Array("one","two","two","three","three","three")
    val wordPairsRDD=sc.parallelize(words).map(word=>(word,1))
    val wordCountWithRedcue=wordPairsRDD.reduceByKey(_ + _).collect()
    val wordCountWithGroup=wordPairsRDD.groupByKey().map(t=>(t._1,t._2.sum)).collect()  //t._1表示每个元组的第一个，t._2表示每个元组的第二个
    val math = sc.textFile("file:///d:/spark/dataset/result_math.txt")   //读取文件
    val m_math = math.map{x=> val line=x.split(" ");(line(0),line(1),line(2).toInt)}//一行根据空格分隔，最后一行变成int  
val bigdata = sc.textFile("file:///d:/spark/dataset/result_bigdata.txt") 
val m_bigdata = bigdata.map{x=>val line=x.split(" ");(line(0),line(1),line(2).toInt)}
val student_100 = m_math.filter(_._3 == 100).union(m_bigdata.filter(_._3 == 100)) //两个文件第三行=100，合并
val result = student_100.map(_._1).distinct //去重
val resultArray = result.collect //行动


    
    
  }
}