package org.shj.spark.core

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions

object Transformation {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf
    conf.setAppName("Transformation").setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
        
    aggregateDemo(sc)
    aggregateByKeyDemo(sc)
    
    cogroupDemo(sc)
    coalesceAndRepartitionDemo(sc)
    
    filterAndMapDemo(sc)    
    flatMapDemo(sc)
    foldDemo(sc)
    
    groupByKeyDemo(sc)    
    intersectionAndJoinAndCartesianDemo(sc)
    
    minMaxDemo(sc)
    
    sampleDemo(sc)
    
    unionDistinctDemo(sc)  
    
    zipDemo(sc)
    sc.stop()
  }
  
  def cogroupDemo(sc: SparkContext){
    println(" ======== cogroupDemo ===========")
    val stu1 = Array(("s1", "zhuyin"), ("s2", "xuruyun"), ("s3", "bdyjy"), ("s4", "yangmi"))    
    val stu1Rdd = sc.parallelize(stu1)
    
    val score = Array(("s6", 80), ("s2", 90), ("s5", 98), ("s1", 70), ("s3", 60))
    val scoreRdd = sc.parallelize(score,2)
    
    stu1Rdd.cogroup(scoreRdd).foreach(item => {
      println("key: " + item._1 + "=== R1: " + item._2._1.mkString(";", ", ", ";") + "===== R2: " + item._2._2.mkString(";", ", ", ";"))      
    })
    
    stu1Rdd.cogroup(scoreRdd).map(item => {
      val name = if(item._2._1.isEmpty)  "未知" else item._2._1.head
      val score = if(item._2._2.isEmpty)  "0" else item._2._2.head
      (item._1, name, score)
    }).foreach(println(_))
  }
  
  def zipDemo(sc: SparkContext){
    println(" ======== zipDemo ===========")
    val nums = Array(3,2,5,10)
    val numRdd = sc.parallelize(nums)
    
    print("numRdd.zipWithIndex(): ")
    numRdd.zipWithIndex().foreach(item => print(item + ", "))
    println
    
    val stu1 = Array(("s1", "zhuyin"), ("s2", "xuruyun"), ("s3", "bdyjy"), ("s4", "yangmi"))    
    val stu1Rdd = sc.parallelize(stu1)
    
    print("numRdd.zip(stu1Rdd): ")
    //进行 zip 的两个RDD, 必须每个 Partition 中的记录的数量都一样，否则会报错!!!
    numRdd.zip(stu1Rdd).foreach(item => print(item + ", "))
    println
  }
  
  def minMaxDemo(sc: SparkContext){
    val nums = Array(3,2,5,10,9)
    val numRdd = sc.parallelize(nums, 2)
    
    val min = numRdd.takeOrdered(1)
    print("min element: ")
    min.foreach(println)
    
    val max = numRdd.top(1)
    print("max element: ")
    max.foreach(println)
  }
  
  def coalesceAndRepartitionDemo(sc: SparkContext){
    println(" ======== coalesce ===========")
    val rdd = sc.parallelize(1 to 200, 20)
    rdd.coalesce(2) //减少partition的数量，通常用于过滤完一个大的dataset后
    rdd.coalesce(100, true) //增加 partition 的数量，通常用于某些 partition 中数据偏大， 注意：此种情况需要第二个参数为true
    
    rdd.repartition(100)  // 重新 shuffle RDD中的数据，使各个partition中的数据均衡，同 coalesce(100, true)
  }
  
  def foldDemo(sc: SparkContext){
    println(" ======== foldDemo ===========")
    val nums = Array(3,2,5,10,9)
    val numRdd = sc.parallelize(nums, 2)
    println("fold(_+_): " + numRdd.fold(0)(_+_)) // --> 29  参考 aggregate，想一下为什么和下面的语句输出是相同的值
    println("fold(_-_): " + numRdd.fold(0)(_-_)) // --> 29  
  }
  
  def intersectionAndJoinAndCartesianDemo(sc: SparkContext){
    val stu1 = Array(("s1", "zhuyin"), ("s2", "xuruyun"), ("s3", "bdyjy"), ("s4", "yangmi"))
    val stu2 = Array(("s1", "zhuyin"), ("s5", "ydtg"), ("s6", "wtl"))
    val score = Array(("s6", 80), ("s2", 80), ("s5", 98), ("s1", 70), ("s3", 60))
    
    val stu1Rdd = sc.parallelize(stu1)
    val stu2Rdd = sc.parallelize(stu2)
    val scoreRdd = sc.parallelize(score,2)    
    
    println(" ======== intersection ===========")
    stu1Rdd.intersection(stu2Rdd).foreach(println) // intersection 只留下相同的项    
    
    println(" ======== subtract ===========")
    stu1Rdd.subtract(stu2Rdd).foreach(println) // 去除相同的项
    
    
    println(" ======== join ===========")
    stu1Rdd.join(scoreRdd).foreach(println) //只返回两个RDD中都有的key的记录; 
    
    println(" ======== cartesian ===========")
    stu1Rdd.cartesian(scoreRdd).foreach(println) //笛卡尔
    
    println("Record number in stu1Rdd: " + stu1Rdd.count())
    println("Values number in scoreRdd: " + scoreRdd.countByValue())
  }
  
  def unionDistinctDemo(sc: SparkContext){
    println(" ========= union ==========")
    val stu1 = Array(("s1", "zhuyin"), ("s2", "xuruyun"), ("s3", "bdyjy"), ("s4", "yangmi"))
    val stu2 = Array(("s1", "zhuyin"), ("s5", "ydtg"), ("s6", "wtl"))
    
    val stu1Rdd = sc.parallelize(stu1)
    val stu2Rdd = sc.parallelize(stu2)
    
    val stuRdd = stu1Rdd.union(stu2Rdd) // union 只是简单的合并，并不会去重
    stuRdd.foreach(println)
    
    println(" ========= distinct ==========")
    stuRdd.distinct().foreach(println) // distinct 会去重
  }
  
  def aggregateDemo(sc: SparkContext){
    /**
     * aggregate 操作中的 seqOp 函数，此函数的流程是：
     * 1. tmpResult(此函数中的第一个参数,初始值是aggregate函数中的 zeroValue 的值)与 partition 中的第一条记录(此函数中的第二个参数)
     *    进行seqOp操作，操作的结果做为tmpResult，
     * 2. tmpResult继续与 partition 中的其它记录进行 seqOp操作
     * 
     * 此示例中， tmpResult 为 (总分数, 学生数)
     */
    def seqOp(tmpResult: (Int, Int), item: (String, Int)) : (Int, Int) = {
      println("tmpResult: " + tmpResult + "\titem: " + item)
      (tmpResult._1 + item._2, tmpResult._2 + 1)
    }
    
    /**
     * aggregate 操作中的 combOp 函数，此函数的作用是：
     * 对每一个partition 进行 seqOp操作后的结果进行此combOp操作
     * 此函数的流程： 
     * 1. 把aggregate函数中的 zeroValue 的值和第一个partition的值传入此函数，进行combOp操作
     * 2. 把上面的结果和第二个partition的值传入此函数，进行combOp操作
     * 3. 以此类推
     */
    def combOp(result1: (Int, Int), result2: (Int, Int)): (Int, Int) = {
      println("result1: " + result1 + "\tresult2: " + result2)
      (result1._1 + result2._1, result1._2 + result2._2)
    }
    
    println(" ======== aggregateDemo ===========")
    val score = Array(("s6", 80), ("s2", 90), ("s5", 98), ("s1", 70), ("s3", 60))
    val scoreRdd = sc.parallelize(score,2)
    
    //此aggregate 的 zeroValue的值为 (0,0)，表示初始总分数是0，初始学生数是0
    val totalScoreAndCount = scoreRdd.aggregate((0,0))(seqOp, combOp)
    println("总分: " + totalScoreAndCount._1 + ";  平均分: " + (totalScoreAndCount._1/totalScoreAndCount._2))
    println(" ======== aggregateDemo ===========")
  }
  
  def aggregateByKeyDemo(sc: SparkContext){
    println(" ======== aggregateByKeyDemo ===========")
    val arr = Array("I love you", "I like you", "I hate her")
    val pair = sc.parallelize(arr).flatMap(_.split("\\s+")).map((_, 1))
    /**
     * aggregateByKey其实和reduceByKey差不多，reduceByKey是aggregateByKey的简化版, 
     * aggregateByKey可以指定一个逻辑上的“零”值，这个“零”值会被多次使用，详见上面 aggregateDemo 中的说明
     * aggregateByKey里面的参数需要三个
     * 第一个：每个Key的初始值 
     * 第二个， Seq Function，进行shuffle时， map端的操作
     * 第三个，进行shuffle时 reduce端的操作
     */
    pair.aggregateByKey(0)(_+_, _+_).foreach(println)
    println
  }
  
  def filterAndMapDemo(sc: SparkContext){
    val nums = sc.parallelize(1 to 10)
    val filterRdd = nums.filter(_ % 2 == 0)
    val mapRdd = filterRdd.map(item => item * 2)
    mapRdd.collect().foreach(println)
  }
  
  def flatMapDemo(sc: SparkContext){
    println(" ======== flatMapDemo ===========")
    val data = Array("Hello Spark", "Hello Hadoop", "Hello Scala")
    val dataRdd = sc.parallelize(data)
    
    //flatMap 把每一项进行拆分后，再合并成一个大的
    val flatRdd = dataRdd.flatMap(_.split(" "))
    flatRdd.collect().foreach(println)
  }
  
  def groupByKeyDemo(sc: SparkContext){
    println(" ======== groupByKeyDemo ===========")
    val  tupleData = Array((20, "Spark"),(30, "Hadoop"), (20,"Scala"))
    val groupRdd = sc.parallelize(tupleData).groupByKey()
    groupRdd.collect().foreach(println)
  }
  
  def sampleDemo(sc: SparkContext){
    println(" ======== sampleDemo ===========")
    val data = Array("zhuyin01","zhuyin02","zhuyin03","zhuyin04","zhuyin05","zhuyin06",
				"zhuyin07","zhuyin08","zhuyin09","zhuyin10","zhuyin11","zhuyin12")
				
    val dataRdd = sc.parallelize(data, 2)
    
    dataRdd.sample(false, 0.5, System.currentTimeMillis()).foreach(println)
  }
}