package day02.operator.transformation

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author wsl
 * @Description
 */
object Value_Partition {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("rdd").setMaster("local[*]")
    val sc = new SparkContext(conf)

    val Rdd: RDD[Int] = sc.makeRDD(Array(1, 2, 3, 4, 5, 1, 2, 4, 7), 3)

    //simple
    Rdd
      .sample(false, 0.4, 1)
      .collect().foreach(println)

    //distinct
    Rdd
      .distinct()
      .mapPartitionsWithIndex((index, iters) => iters.map((index, _)))
      .collect().foreach(println)

    //coalesce 减少分区 默认不走shuffle
    Rdd
      .coalesce(2, true)
      .mapPartitionsWithIndex((index, iters) => iters.map((index, _)))
      .collect().foreach(println)


    //repartition =coalesce(true) 分区可+/-
    Rdd
      .repartition(2)
      .mapPartitionsWithIndex((index, iters) => iters.map((index, _)))
      .collect().foreach(println)

    //groupBy
    Rdd
      .groupBy(_ % 2)
      .collect().foreach(println)

    //>2的分一组，<2的分一组
    Rdd
      .groupBy(
        int => if (int > 2) 1
        else 0
      ).collect().foreach(println)

    //Thread.sleep(100000) //http://localhost:4040   查看DAG有向图


    //sort
    Rdd.sortBy(num => num).collect().foreach(println)
    Rdd.sortBy(num => -num).collect().foreach(println) //倒序

    sc.makeRDD(List("44", "22", "33"))
      .sortBy(word => word.toInt)
      .collect().foreach(println)

    sc.makeRDD(List((5, 7), (3, 4), (5, 2)))
      .sortBy(t => t) //先按照tuple的第一个，再按照第二个
      .collect().foreach(println)

    sc.stop()
  }
}
