package spark.core.scala

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkContext, SparkConf}

/**
 * Created by ezheng on 2018/2/9.
 */
object TransformationOperation {

  def main(args: Array[String]) {
    //    map()
    cogroup()
  }

  def map(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("map")
    val sc = new SparkContext(conf)
    val list = Array(1, 2, 3, 4, 5)
    val listRDD = list.map(x => x * 2)
    listRDD.foreach(println)
  }

  def filter(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("filter")
    val sc = new SparkContext(conf)
    val list = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
    val listRDD = list.map(x => x * 2)
    listRDD.foreach(println)
  }

  def flatMap(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("flatMap")
    val sc = new SparkContext(conf)
    val list = Array("hello you", "hello me", "hello world")
    val listRDD = list.flatMap(x => x.split(" "))
    listRDD.foreach(println)
  }


  def groupByKey(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("groupByKey")
    val sc = new SparkContext(conf)
    val list = Array(Tuple2("class1", 80),
      Tuple2("class1", 75),
      Tuple2("class2", 65),
      Tuple2("class1", 77))
    val listRDD = sc.parallelize(list)
    val groupScores: RDD[(String, Iterable[Int])] = listRDD.groupByKey();

    groupScores.foreach(x => {
      println(x._1)
      val ite: Iterable[Int] = x._2
      ite.foreach(println)
    })


  }

  def reduceByKey(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("reduceByKey")
    val sc = new SparkContext(conf)
    val list = Array(Tuple2("class1", 80),
      Tuple2("class1", 75),
      Tuple2("class2", 65),
      Tuple2("class1", 77))
    val listRDD = sc.parallelize(list)
    val totalScores: RDD[(String, Int)] = listRDD.reduceByKey(_ + _)

    totalScores.foreach(x => {
      println(x._1 + " total scores is " + x._2)
    })
  }

  def sortByKey(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("sortByKey")
    val sc = new SparkContext(conf)
    val list = Array(Tuple2(80, "class1"),
      Tuple2(75, "class1"),
      Tuple2(65, "class2"),
      Tuple2(77, "class1"))
    val listRDD = sc.parallelize(list)
    val totalScores: RDD[(Int, String)] = listRDD.sortByKey(true)

    totalScores.foreach(x => {
      println(x._1 + " total scores is " + x._2)
    })
  }


  def join(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("join")
    val sc = new SparkContext(conf)
    val list1 = Array(Tuple2(1, "Eric"),
      Tuple2(2, "Tom"),
      Tuple2(3, "Pen"),
      Tuple2(4, "Lucy"))
    val list2 = Array(Tuple2(1, 85),
      Tuple2(2, 88),
      Tuple2(3, 88),
      Tuple2(4, 55))
    val listRDD1 = sc.parallelize(list1)
    val listRDD2 = sc.parallelize(list2)
    val studentScores: RDD[(Int, (String, Int))] = listRDD1.join(listRDD2)
    studentScores.sortByKey(true).foreach(x => {
      println("NO." + x._1 + ",name:" + x._2._1 + " score :" + x._2._2)
    })
  }




  def cogroup(): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("cogroup")
    val sc = new SparkContext(conf)
    val list1 = Array(
      Tuple2(1, "Eric"),
      Tuple2(2, "Tom"),
      Tuple2(3, "Pen"),
      Tuple2(4, "Lucy"))
    val list2 = Array(
      Tuple2(1, 85),
      Tuple2(2, 88),
      Tuple2(3, 88),
      Tuple2(4, 55),
      Tuple2(1, 68),
      Tuple2(2, 68),
      Tuple2(3, 65),
      Tuple2(4, 85))
    val listRDD1 = sc.parallelize(list1)
    val listRDD2 = sc.parallelize(list2)
    val studentScores: RDD[(Int, (Iterable[String], Iterable[Int]))] = listRDD1.cogroup(listRDD2)
    studentScores.sortByKey(true).foreach(x => {
      println("id:" + x._1)
      println("name:" + x._2._1)
      println("score:" + x._2._2)
    })
  }


}
