package club.beimeng.spark.core.wordcount

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Spark02_Word_Count {

  def wordCount(sc: SparkContext): Unit = {
    //1 读取文件, 获取一行一行的数据
    // hello world
    val lines: RDD[String] = sc.textFile("datas")

    //2 将每行单词进行切分
    // "hello world" => hello, world, hello ,world
//    var chars:Array[Char] = Array[Char](' ', ',','.')
//    println(chars.toString)
    val words: RDD[String] = lines.flatMap(_.split("\\s|,|\\.")) // 通过,或.或 分割

    val wordToOne: RDD[(String, Int)] = words.map(
      (word: String) => (word, 1)
    )

    //3 合并相同的单词，便于统计
    // (hello,hello,hello),(world,world)
    val wordGroup: RDD[(String, Iterable[(String, Int)])] = wordToOne.groupBy(
      (t: (String, Int)) => t._1 // 就是word
    )

    //4 对分组后的数据进行转换
    // (hello,hello,hello),(world,world) => (hello,3),(world,2)
    val wordToCount: RDD[(String, Int)] = wordGroup.map {
      case (word, list) => { // 模式匹配
        val word_Count: (String, Int) = list.reduce(
          (t1: (String, Int), t2: (String, Int)) => {
            (t1._1, t1._2 + t2._2)
          }
        )
        word_Count
      }
    }// 结构转换用map

    //5 打印数据
    val array: Array[(String, Int)] = wordToCount.collect()

    array.foreach(println)
  }

  def main(args: Array[String]): Unit = {

    // Application

    // Spark 框架

    // 建立和Spark 框架的连接
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("WordCount")
    val sc = new SparkContext(sparkConf)
    // 执行业务作业
    wordCount(sc)

    // 关闭连接
    sc.stop()
  }
}
