package cn.hnu.spark

import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object RddCase06 {
  def main(args: Array[String]): Unit = {
    //增强版词频统计，并按单词出现次数降序排列
    //定义一个特殊字符集合
    val list: List[String] = List(",", ".", "!", "#", "$", "%")

    //创建Rdd
    val conf: SparkConf = new SparkConf().setAppName("Rdd-demo").setMaster("local[2]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")

    val lines: RDD[String] = sc.textFile("data/words1.txt")
    val newLines: RDD[String] = lines.filter(StringUtils.isNoneBlank(_))
    val wordAndCharRdd: RDD[String] = newLines.flatMap(_.split("\\s+"))

    //统计单词及特殊字符个数
    val n1: Long = wordAndCharRdd.count()
    //过滤掉特殊字符
    val wordRdd: RDD[String] = wordAndCharRdd
      .filter(ch => {
        if (list.contains(ch)) {
          false
        } else {
          true
        }
      })
    //统计过滤特殊字符之后单词个数
    val n2: Long = wordRdd.count()

    val result: RDD[(String, Int)] = wordRdd
      .map((_, 1)).reduceByKey(_ + _)
    //排序
    val sortedResult: RDD[(String, Int)] = result.sortBy(_._2, false)
    //输出
    println(sortedResult.collect().toBuffer)
    println("特殊字符出现次数：" + (n1-n2))
  }
}
