package com.cqk.bigdata.spark

import org.apache.spark._


/**
  * @Author cqk
  * 二次排序
  */
object SecondarySort {

  /**
    *
    * @param args
    */
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("SecondarySort").setMaster("local[*]")

    val sc= new SparkContext(conf)

    val data = List(("x",2,9),("y",2,5),("x",1,3),
      ("y",1,7),("y",3,1),("x",3,6),
      ("z",1,4),("z",2,8),("z",3,7),
      ("z",4,0),("p",2,6),("p",4,7),
      ("p",1,9),("p",6,0),("p",3,7)
    )

      sc.parallelize(data).map(x=>(x._1.toString,(x._2.toInt,x._3.toInt)))
      .groupByKey().map(x=>(x._1,x._2.toList.sortWith(_._1>_._1))).collect().foreach(println)

    /*
    (p,CompactBuffer((2,6), (4,7), (1,9), (6,0), (3,7)))
    (x,CompactBuffer((2,9), (1,3), (3,6)))
    (y,CompactBuffer((2,5), (1,7), (3,1)))
    (z,CompactBuffer((1,4), (2,8), (3,7), (4,0)))
     */

    /**
      * (p,List((1,9), (2,6), (3,7), (4,7), (6,0)))
        (x,List((1,3), (2,9), (3,6)))
        (y,List((1,7), (2,5), (3,1)))
        (z,List((1,4), (2,8), (3,7), (4,0)))
      */


  }
}
