package com.doit.day04

import scala.io.Source

object WordCount {
  def main(args: Array[String]): Unit = {
    /*val lines: Array[String] = Source.fromFile("data/word.txt").getLines().toArray
    //hello,hi,hello,hi,hadoop,hive,hadoop
    val arrayAndWord: Array[String] = lines.flatMap(_.split(","))
    //按照word进行分组
    val wordAndWords: Map[String, Array[String]] = arrayAndWord.groupBy(word => word)
    //映射成长度
    wordAndWords.map(tp=>(tp._1,tp._2.length)).foreach(println)*/
    /*   Source.fromFile("data/word.txt")
         .getLines()
         .toArray
         .flatMap(_.split(","))
         .groupBy(word => word)
         .map(tp=>(tp._1,tp._2.length))
         .foreach(println)
   */
/*
Source.fromFile("D:\\develop\\ideaWorkSpace\\myself\\Teach\\data\\word.txt")
      .getLines()
      .toArray
      .flatMap(_.split(","))
      .groupBy(word=>word)
      .map(tp=>(tp._1,tp._2.length))
      .foreach(println)*/

    //用mapreduce的思想做这个wordcount
    val words: Array[String] = Source.fromFile("D:\\develop\\ideaWorkSpace\\myself\\Teach\\data\\word.txt")
      .getLines()
      .toArray
      .flatMap(_.split(","))

    val wordAndOne: Array[(String, Int)] = words.map(word => (word, 1))
    val grouped: Map[String, Array[(String, Int)]] = wordAndOne.groupBy(tp => tp._1)
    grouped.map(tp=>{
      val word: String = tp._1
      val value: Array[Int] = tp._2.map(tp => tp._2)
      val count: Int = value.sum
      (word,count)
    }).foreach(println)


  }
}
