package com.study.spark.scala.rdd

import org.apache.spark.{SparkConf, SparkContext}

/**
  * 排序示例-数据存放在元组，使用元组排序
 *
  * @author stephen
  * @create 2019-03-15 11:48
  * @since 1.0.0
  */
object OrderDemo_Tuple3 {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("Order Tuple3 Demo")
      .setMaster("local[*]")
    val sc = new SparkContext(conf)

    val persons = List("zhangsan 25 90","lisi 20 98","wangwu 30 80","zhaoliu 25 98")
    val rdd = sc.parallelize(persons)

    // Ordering[(Int,Int)] 最终比较规则的格式
    // on[(String,Int,Int)] 比较之前的数据格式
    // (t=>(-t._3,t._2)) 将比较前的格式转换成要比较的格式
    implicit val rules = Ordering[(Int,Int)].on[(String,Int,Int)](t=>(-t._3,t._2))

    val result = rdd.map(line => {
      val splits = line.split(" ")
      (splits(0), splits(1).toInt, splits(2).toInt)
    }).sortBy(tp => tp)  // 利用元组比较规则进行比较：按照字段顺序进行比较
      .collect()

    println(result.toBuffer)

  }
}

