package cn.spark.study.core

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext

object TransformationOperation {
  def main(args: Array[String]): Unit = {
    val sc = getSC();
    //map(sc);
    //filter(sc);
    //flatMap(sc)
    //groupBykey(sc)
    //reduceByKey(sc)
    //sorteByKey(sc)
    joinStudent(sc)
  }
  
  
  /*
   * 案例七： join
   */
  
  def joinStudent(sc:SparkContext){
    val idcore = Array(
      Tuple2(1,30),
      Tuple2(2,40),
      Tuple2(3,50)
    )
    
    val stuid = Array(
      Tuple2(1,"kevin"),
      Tuple2(2,"Jack"),
      Tuple2(3,"Marray")
    )
    
    val idcoreRDD = sc.parallelize(idcore, 1)
    val stuidRDD = sc.parallelize(stuid, 1)
    
   // idcoreRDD.join(stuidRDD).foreach(pair => println("id is " + pair._1 ) + " " + println("name is "+ pair._2._2) + println("score is " + pair._2._1) + println("=======================") )
    idcoreRDD.join(stuidRDD).foreach(pair => {
      println("id is " + pair._1 );
      println("name is "+ pair._2._2);
      println("score is " + pair._2._1);
      println("=======================")
      })
    
  }
  
  /*
   * 案例六： 
   */
  def sorteByKey(sc:SparkContext){
    val list = Array(
        Tuple2("kevin",100),
        Tuple2("Jack",20),
        Tuple2("Marry",90)
        )
        
//        sc.parallelize(list, 1).map(pair => (pair._2,pair._1)).sortByKey(false, 1).map(pair => (pair._2,pair._1)).foreach(score => println(score._1 + " name is " + score._2));
          sc.parallelize(list, 1).sortByKey(true, 1).foreach(score => println(score._1 + " name is " + score._2));
  }
  
  /*
   * 案例五： reduceByKey
   */
  
  def reduceByKey(sc:SparkContext){
    val list = Array(
     Tuple2("class1",90),
     Tuple2("class1",40),
     Tuple2("class2",50),
     Tuple2("class1",20),
     Tuple2("class2",30)
    )
    
    sc.parallelize(list, 1).reduceByKey(_ + _).foreach(pair => println("class " + pair._1 + " score is " + pair._2))
  }
  
  /*
   * 案例四：groupByKey
   */
  
  def groupBykey(sc:SparkContext){
    val list = Array(
     Tuple2("class1",90),
     Tuple2("class1",40),
     Tuple2("class2",50),
     Tuple2("class1",20),
     Tuple2("class2",30)
    )
    sc.parallelize(list, 1).groupByKey.foreach { c => {println(c._1);c._2.foreach { vale => println(vale); } ;println("=============")} }
  
  }
  /*
   * 案例三： flatMap
   */
  def flatMap(sc:SparkContext) {
    val list = Array("hello you","hello me","hello world" );
    sc.parallelize(list, 1).flatMap { line => line.split(" ") }.foreach { words => println(words) }
  }
  
  /*
   * 案例一： map 算子
   */
  def map(sc: SparkContext){
    val list = Array(1,2,3,4,5)
    val numbers = sc.parallelize(list, 2)
    val mutipleNumber = numbers.map { num => num * 2 }
    
    mutipleNumber.foreach { mutiplenum => println(mutiplenum) }
  }
  
  /*
   * 案例二： filter 算子
   */
  def filter(sc:SparkContext){
    val list = Array(1,2,3,4,5,6,7,8,9,10)
    
    sc.parallelize(list, 1).filter { num => num % 2 == 0 }.foreach { num => println(num) }
    
  }
  
  //方法： 取得SparkContext
  def getSC(): SparkContext = {
    val conf = new SparkConf()
      .setAppName("TransformationOperation")
      .setMaster("local");
    
    val sc = new SparkContext(conf)
    return sc;
  }
  
  //方法： 打印结果
}