package RDD

import org.apache.spark.{SparkConf, SparkContext}
import org.junit.Test

class 转换算子 {
  val conf = new SparkConf()
    .setMaster("local[6]")
    .setAppName("Create_RDD")
  val sc = new SparkContext(conf)

  @Test
  def mapTest(): Unit = {
    //创建RDD
    val rdd1 = sc.parallelize(Seq(1, 2, 3))
    println(rdd1.foreach(println(_)))
    //执行map操作,将数据扩大10倍
    val rdd2 = rdd1.map(item => item * 10)
      .collect()
      .foreach(println(_))
  }

  @Test
  def flatMapTest(): Unit = {
    //创建RDD
    val rdd1 = sc.parallelize(Seq("hello world", "hi Jack"))
    //转换,将数据以空格分割
    rdd1.flatMap(_.split(" "))
      .collect()
      .foreach(println(_))
  }

  @Test
  def ReduceByKeyTest(): Unit = {
    //创建RDD
    val rdd1 = sc.parallelize(Seq("hello world", "hi world", "hello Rom"))
    //处理数据
    rdd1.flatMap(_.split(" "))
      .map(item => (item, 1))
      .reduceByKey((curr, agg) => curr + agg)
      .collect()
      .foreach(println(_))
    //关闭
    sc.stop()
  }

  @Test
  def mapPartitions(): Unit = {
    //创建RDD
    //数据处理
    //获取结果
    sc.parallelize(Seq(1, 2, 3, 4, 5, 6), 2)
      .mapPartitions(iter => iter.map(item => item * 10))
      .collect()
      .foreach(println(_))
  }

  @Test
  def mapPartitionsWithIndex(): Unit = {
    sc.parallelize(Seq(1, 2, 3, 4, 5, 6), 2)
      .mapPartitionsWithIndex((index, iter) => {
        println("index:" + index)
        iter.foreach(item => println(item))
        iter
      }).collect()
  }

  @Test
  def filter(): Unit = {
    sc.parallelize(Seq(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
      .filter(item => item % 2 == 0)
      .collect()
      .foreach(println(_))
  }

  @Test
  def sample(): Unit = {
    sc.parallelize(Seq(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
      //false是不放回   0.5采用比例百分之50  种子是1
      .sample(false, 0.5, 1)
      .collect()
      .foreach(println(_))
  }

  @Test
  def mapValues(): Unit = {
    sc.parallelize(Seq(("a", 1), ("b", 2), ("c", 3)))
      .mapValues(item => item * 10)
      .collect()
      .foreach(println(_))
  }

  @Test
  //交集
  def intersection(): Unit = {
    val rdd1 = sc.parallelize(Seq(1, 2, 3, 4, 5))
    val rdd2 = sc.parallelize(Seq(3, 4, 5, 6, 7))
    rdd1.intersection(rdd2).collect().foreach(println(_))
  }

  @Test
  //并集
  def union(): Unit = {
    val rdd1 = sc.parallelize(Seq(1, 2, 3, 4, 5))
    val rdd2 = sc.parallelize(Seq(3, 4, 5, 6, 7))
    rdd1.union(rdd2).collect().foreach(println(_))
  }

  @Test
  //差集
  def subtract(): Unit = {
    val rdd1 = sc.parallelize(Seq(1, 2, 3, 4, 5))
    val rdd2 = sc.parallelize(Seq(3, 4, 5, 6, 7))
    //rdd1里面有rdd2没有的
    rdd1.subtract(rdd2).collect().foreach(println(_))
  }

  @Test
  def groupByKey(): Unit = {
    sc.parallelize(Seq(("a", 1), ("c", 5), ("d", 8), ("c", 1), ("a", 5), ("d", 3), ("b", 2)))
      .groupByKey()
      .collect()
      .foreach(println(_))
  }

  @Test
  def combineByKey(): Unit = {
    sc.parallelize(Seq(
      ("zhangsan", 99.0),
      ("zhangsan", 96.0),
      ("lisi", 97.0),
      ("lisi", 98.0),
      ("zhangsan", 97.0)
    ))
      //      .combineByKey(
      //        //这个只执行在第一条数据上
      //        createCombiner = (curr: Double) => (curr, 1),
      //        mergeValue = (curr: (Double, Int), next: Double) => (curr._1 + next, curr._2 + 1),
      //        mergeCombiners = (curr: (Double, Int), agg: (Double, Int)) => (curr._1 + agg._1, curr._2 + agg._2)
      //      ).map(item => (item._1, item._2._1 / item._2._2))
      //      .collect()
      //      .foreach(println(_))
      .combineByKey(
      createCombiner = (curr: Double) => (curr, 1),
      mergeValue = (curr: (Double, Int), nextValue: Double) => (curr._1 + nextValue, curr._2 + 1),
      mergeCombiners = (curr: (Double, Int), agg: (Double, Int)) => (curr._1 + agg._1, curr._2 + agg._2)
    )
  }

  @Test
  def foldByKey(): Unit = {
    sc.parallelize(Seq(("a", 1), ("b", 2), ("a", 3)))
      .foldByKey(10)((curr, agg) => curr + agg)
      .collect()
      .foreach(println(_))
  }

  @Test
  def aggregateByKey(): Unit = {
    sc.parallelize(Seq(("手机", 10), ("电脑", 15), ("手机", 20)))
      .aggregateByKey(0.8)((zeroValue, item) => item * zeroValue, (curr, agg) => curr + agg)
      .collect()
      .foreach(println(_))
  }
  @Test
  def join(): Unit ={
    val rdd1 = sc.parallelize(Seq(("a",1),("a",2),("b",1)))
    val rdd2 = sc.parallelize(Seq(("a",10),("a",11),("a",12)))
    rdd1.join(rdd2)
      .collect()
      .foreach(println(_))
  }
  @Test
  def sortby(): Unit ={
    sc.parallelize(Seq(2,3,6,7,2,4,5,6,3,24,9))
      .sortBy(item=>item)
      .collect()
      .foreach(println(_))
  }
  @Test
  def sortByKey(): Unit ={
    sc.parallelize(Seq(("a",1),("a",2),("b",1),("b",2),("b",5)))
      .sortByKey()
      .collect()
      .foreach(println(_))
  }
}
