package com.atbeijing.bigdata.spark.core.rdd.operator.transform

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Spark20_Oper_Transform {

    def main(args: Array[String]): Unit = {

        val conf = new SparkConf().setMaster("local[*]").setAppName("TransformOperator")
        val sc = new SparkContext(conf)

        // TODO 算子 - 转换 - KV - sortByKey (6 / 10)
        val rdd = sc.makeRDD(
            List(
                (2,1),(3,1),(1,1), (1,3),(1,2)
            ),2
        )

//         sortByKey根据key进行排序
//         有歧义：
//         1. (a,1)(c,2)(b,3) => (a, 1)(b,3)(c,2)
//         2. (a,3)(a,1)(a,2) => (a,1)(a,2)(a,3)
//         结果为对key排序
        val value: RDD[(Int, Int)] = rdd.sortByKey(false)
        value.collect().foreach(println)

        val rdd1 = sc.makeRDD(
            List(
                (new User(), 1), (new User(),2), (new User(),3)
            )
        )
        rdd1.sortByKey()

        sc.stop()

    }
    class User extends Ordered[User]{
        override def compare(that: User): Int = {
            -1
        }
    }
}
