package HadoopWithSpark.homework

import java.io.PrintWriter

import scala.collection.mutable.ArrayBuffer
import scala.io.Source

/**
 * @Author TheKernel
 * @Date 2019/9/24 5:50 下午
 * @Version 1.0
 * @Description Scala 第一个 WordCount 小程序
 */
object WordCount {

  def main(args: Array[String]): Unit = {
    val inputPath = "data/input/WordCount.txt"
    val outputPath = "data/output/WordCountResult.txt"

    // 读取指定文件
    val inputFile = Source.fromFile(inputPath)
    val lines = inputFile.getLines()
    // 解析对应字符串并进行排序
    val words = lines.flatMap(x=>x.split("\\s"))
    val word = words.toBuffer.groupBy((x: String)=>x)
    var word2count = word.map{
      case (k: String, v: ArrayBuffer[String]) => (k, v.length)
    }.toList
    word2count = word2count.sortBy(x => x._2).reverse
    // 写入到指定文件
    val out = new PrintWriter(outputPath)
    word2count.foreach(x => out.println(x._1.toString + " -> " + x._2.toString))
    println("WordCount Success !!!")
    // 关闭输入输出流
    inputFile.close()
    out.close()


//    word2count.foreach(println)
//    def count(v: (String, ArrayBuffer[String])): (String, Int) = {
//      (v._1, v._2.length)
//    }

//    word.foreach(println)
//    println(words.toList)
//    lines.foreach(println)
  }

}
