package cn.doitedu.day03

object WordCount2 {

  def main(args: Array[String]): Unit = {


    val lines: Array[String] = Array("spark hadoop flink", "spark hive", "hadoop spark spark", "flink spark hive flink flink hive")
    //你一定要明确，你对什么进行操作（数组或集合），数组或集合里面数据的类型
    //1.数组；里面装的数据是字符串类型
    //传入map方法的函数，输入的数据是字符串，即一行多个单词
    val words: Array[String] = lines.flatMap(_.split(" "))
    //分组
    val grouped: Map[String, Array[String]] = words.groupBy(x => x)
    //统计map的value的长度
    val wordAndCount: Map[String, Int] = grouped.map(t => (t._1, t._2.length))
    val res: List[(String, Int)] = wordAndCount.toList.sortBy(_._2).reverse

    println(res.toBuffer)




  }

}
