package com.wfg.bigdata.spark.core.wc1

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext, rdd}

object Spark01_WordCount {

  def main(args: Array[String]): Unit = {

    // 建立spark 框架的链接
    val spc = new SparkConf().setMaster("local").setAppName("workcount")
    val sc = new SparkContext(spc)

    //1.读取文件
    val lines: RDD[String] = sc.textFile("datas")
    lines.foreach(line => {
      println(line)
    })
    lines.foreach(println(_))
    //2.将一行数据进行拆分，形成一个一个的单词（分词=>扁平化：将整体拆分成个体的操作）
    val words: RDD[String] = lines.flatMap(_.split(" "))
    //3.将数据根据单词进行分组，便于统计
    //(hello,hello...),(world,world,world...)
    val wordGroup: RDD[(String,Iterable[String])] = words.groupBy(word => word)
    //4.将分组后的数据进行转换
    //(hello,hello...),(world,world,world...)
    //(hello,2),(world,3)
    val res: RDD[(String,Int)] =wordGroup.map{
      case (word,list) =>{
        (word,list.size)
      }
    }
    //5.将转换的结果采集到控制台打印出来
    val array: Array[(String, Int)] = res.collect()
    array.foreach(println)
    sc.stop();
  }
}
