package com.doit.day02

import com.doit.beans.OrdersBean
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author:
 * @WX: 17710299606
 * @Tips: 学大数据 ,到多易教育
 * @DOC: https://blog.csdn.net/qq_37933018?spm=1000.2115.3001.5343
 * @Description:
 */
object Demo03Sort {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
    val sc = SparkContext.getOrCreate(conf)

    val ls = List[Int](1, 8, 7, 4, 5, 6, 2, 3, 9)
    val arr = Array("java", "abc", "scala", "hive")
    val seq = Seq[OrdersBean](
      OrdersBean("oid01", 100D, "bj", "A"),
      OrdersBean("oid02", 300D, "bj", "A"),
      OrdersBean("oid03", 900D, "bj", "A"),
      OrdersBean("oid04", 10D, "bj", "A")
    )

    val mp = Map[String, Int](("zss", 23), ("pjl", 33), ("lny", 21))
    val rdd1: RDD[Int] = sc.parallelize(ls, 2) // 2
    // val res: RDD[(Int, Int)] = rdd1.keyBy(e => e)

    val rdd2: RDD[String] = sc.parallelize(arr, 2)
    val rdd3: RDD[OrdersBean] = sc.parallelize(seq, 2)

    rdd3.sortBy(_.money)
    rdd3.sortBy(_.oid)

    // rdd3.sortBy(bean=>bean)



    val rdd4: RDD[(String, Int)] = sc.parallelize(mp.toList, 2)
    val soredRDD: RDD[Int] = rdd1.sortBy(e => e, false) // 2 和父RDD的分区个数一致
    soredRDD /*.repartition(3)*/
      .saveAsTextFile("data/sorted/")
    /*    rdd4.sortBy(_._1)
     rdd4.sortByKey()*/
    sc.stop()
  }

}
