package com.ustcinfo.study.scala.r2

import org.apache.spark.{SparkConf, SparkContext}

/*
* 分词统计scala
* */

package object WordCountSpark {

  def main(args: Array[String]): Unit = {
    println("Hello World")
    //spark配置加载
    val sparkConf = new SparkConf().setMaster("local").setAppName("test")
    //    加载上下文
    val sc = new SparkContext(sparkConf)
    //    加载文件--->RDD
    val txtRDD = sc.textFile("C:\\Users\\Barlow\\IdeaProjects\\bd-scala\\src\\main\\resources\\programmingGuide")

    txtRDD.filter(x => x.trim.length > 0) //去除空格
      .map(x => x.split(" ")) //空格 分割
      .flatMap(x => x) //每行一个单词
      .map(x => (x, 1)) //将每行单词变为<word,count>,count值为1
      .reduceByKey(_ + _) //k+v  相同k值的相加合并<word,count>,count值为相同word数
      .map(x => (x._2, x._1)) //两列调换
      .sortByKey(false) //降序排列
      .take(10) //获取前十行
      .foreach(println) //遍历输出

  }


}
