package cn.doitedu.day02

/**
 * 使用scala函数式编程实现单机版的Word
 */
object WordCount {

  //统计出数组中单词的次数并且按照从高到底进行排序
  def main(args: Array[String]): Unit = {

    val lines: Array[String] = Array("spark hadoop flink", "spark hive", "hadoop spark spark", "flink spark hive flink flink hive")

    //1.切分压平
    val words: Array[String] = lines.flatMap(_.split(" "))

    //2.将单词和1组合
    val wordAndOne: Array[(String, Int)] = words.map((_, 1))

    //3.将相同的数据进行分组,即按照单词进行分组
    val grouped: Map[String, Array[(String, Int)]] = wordAndOne.groupBy(t => t._1)

    //4.获取map中value的长度，就是对应key的次数
    val result: Map[String, Int] = grouped.map(t => (t._1, t._2.length))

    //5.排序
    val arr2: Array[(String, Int)] = result.toArray
    //按照对偶元组的次数进行排序
    val res = arr2.sortBy(t => t._2).reverse
    println(res.toBuffer)


    val res3 = lines.flatMap(_.split(" ")).groupBy(x => x).map(t => (t._1, t._2.length)).toList.sortBy(-_._2)

    println(res3.toBuffer)
  }

}
