import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @program: day0304_3
 * @description: 尽信书，则不如无书
 * @author: CoreDao
 * @create: 2021-03-04 21:08
 * */
/**
 * 需要配置win端的hadoop环境
 */
object WorldCount extends App {
  private val conf = new SparkConf()
  conf.setMaster("local").setAppName("WC")
  private val sc = new SparkContext(conf)
  private val lines: RDD[String] = sc.textFile("F:\\MyIDEA\\MyScala\\sxt\\day0304_3\\src\\main\\resources\\data\\hello.txt")
  private val word: RDD[String] = lines.flatMap(lines => {
    lines.split(" ")
  })
  private val pairs: RDD[(String, Int)] = word.map(x => (x, 1))
  private val result: RDD[(String, Int)] = pairs.reduceByKey((a, b) => {
    a + b
  })
  result.sortBy(_._2, false).foreach(println)
  println("-------------------")
  result.foreach(println)

  println("-------------------")
  //lines.flatMap{_.split(" ")}.map{(_,1)}.reduceByKey(_+_).foreach(println)

}
