package com.spark.test

import org.apache.spark.rdd.MapPartitionsRDD
import org.apache.spark.{SparkConf, SparkContext}

object WorkCount {

  def main(args: Array[String]): Unit = {

    val conf : SparkConf = new SparkConf().setAppName("workCount").setMaster("local[2]")
    val sc : SparkContext = SparkContext.getOrCreate(conf);
    val lineRdd  = sc.textFile("/Users/hecj/Desktop/test.txt")

    // .collect 触发操作
    // 方式1
    val list = lineRdd.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).collect

    // 方式2
    val list2 = lineRdd.flatMap(_.split(" ")).map((_,1)).reduceByKey(
      (x,y) => {
        // println(x + "," + y)
        x + y
      }
    ).sortBy(_._1).collect()

    // 方式3 有大到小排序
    val list3 = lineRdd.flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).sortBy(- _._2).collect
    list3.foreach(println)

  }
}
