package Demo2

import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by lenovo on 2017/10/10.
  * 组内排序
  */
object PartionSortTop3 {

  def main(args: Array[String]) {
    System.setProperty("hadoop.home.dir","E://hadoop-liyadong//hadoop-2.7.1")
    val conf = new SparkConf().setMaster("local[2]").setAppName("PartionSortTop3").set("spark.testing.memory","2147480000")
    val sc = new SparkContext(conf)

    val rddFile = sc.textFile("F://paixu.txt").map(line =>{
      val files = line.split("\t")
      val key = files(0)
      val value = files(1)
      (key,value)
    })
    rddFile.foreach(str =>{
      println(str._1+"*"+str._2)
    })
    println(rddFile.toString)
    val groupRdd = rddFile.groupByKey()//.repartition(1).saveAsTextFile("F://paixu1")
    val sortRdd = groupRdd.map(g => {
      val key = g._1
      val value = g._2.toList.sortWith(_<_)
      (key,value)
    })
    sortRdd.repartition(1).saveAsTextFile("F://paixu3")

  }
}
