package com.doit.day01.op

import scala.io.Source

/**
 * 1.读取文件中得内容==》获取到每一行数据
 * 2.最好是将每一行数据转换成一个个得单词
 * 3.既然我们得到了每一个单词，针对单词进行分组
 * 4.求每个单词得个数
 */
object WordCountDemo1 {
  def main(args: Array[String]): Unit = {

    //1.读取文件中得内容==》获取到每一行数据
    val line: Iterator[String] = Source.fromFile("D:\\develop\\ideaWorkSpace\\myself\\Teach\\scalaDemo\\data\\WordCount").getLines()

    //List("hello,hi,hello","hive,hadoop,hive,hbase,hive")
    val list: List[String] = line.toList
    /*
        //2.最好是将每一行数据转换成一个个得单词
        val wordArr: List[Array[String]] = list.map(_.split(","))

        //对他进行压平
        val flatten: List[String] = wordArr.flatten*/

    //    ==>
    val flatten: List[String] = list.flatMap(_.split(","))

    //    println(flatten)//List(hello, hi, hello, hive, hadoop, hive, hbase, hive, hello, hi, hello, hive)

    //List((hello,1), (hi,1), (hello,1)....)
    val tuples: List[(String, Int)] = flatten.map(word => (word, 1))

    //hadoop -> List((hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1)
    val map: Map[String, List[(String, Int)]] = tuples.groupBy(tp => tp._1)

    //我把后面得list里面得元素全部变成1
//    (单词，list(1,1,1,1,1,1))
    val wordAndOne: Map[String, List[Int]] = map.map(map => {
      val word: String = map._1
      //List((hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1), (hadoop,1)
      //      (hadoop,1) ==> 1
      val ints: List[Int] = map._2.map(tp => tp._2)
      (word, ints)
    })

    val res: Map[String, Int] = wordAndOne.map(tp => (tp._1, tp._2.sum))
    println(res)







  }


}
