package com.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * wordCount的另一种实现
 */
object Demo1WordCount2 {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local").setAppName("wordcount")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    // val lines: RDD[String] = sc.textFile("data/words.txt")
    val lines: RDD[String] = sc.textFile("/data/words.txt")
    /*java,spark,hadoop
    java,spark
    java,spark,hadoop,spark*/
    sc.parallelize(Array("java,spark,hadoop", "java,spark", "java,spark,hadoop,spark"), 2).flatMap(_.split(",")).map((_, 1)).reduceByKey(_ + _).foreach(println)

    // 打印分区数
    println(lines.getNumPartitions)

    val words: RDD[String] = lines.flatMap(line => line.split(","))

    val pairWord: RDD[(String, Int)] = words.map(word => (word, 1))

    val result: RDD[(String, Int)] = pairWord.reduceByKey((x: Int, y: Int) => x + y)

    result.sortBy(kv => kv._1, false)

    result.foreach(println)
  }
}
