package study.core.wordcount

import org.apache.hadoop.io.{IntWritable, Text}
import org.apache.hadoop.mapred.SequenceFileInputFormat
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * Spark实现WordCount
 * @author zh
 * @date 2021/5/12 10:15
 */
object Spark_WordCount {
  def main(args: Array[String]): Unit = {


    // spark配置
    // local代表本地环境
    val sparkConf = new SparkConf().setMaster("local").setAppName("WordCount")
    // 建立Spark连接
    val sc = new SparkContext(sparkConf)
    // 读取文件，一行一行的数据
    val lines:RDD[String] = sc.textFile(ClassLoader.getSystemResource("wordcount/wordcount.txt").getPath)
    println("======lines=======")
    sc.sequenceFile("",classOf[Text],classOf[IntWritable])



    for(item <- lines.collect()){
      println(item)
    }
    // 将每行数据进行拆分，分为一个一个的单词
    val words:RDD[String] = lines.flatMap(_.split(" "))
    println("======words=======")
    for(item <- words.collect()){
      println(item)
    }
    // 将单词进行分组
    val group:RDD[(String, Iterable[String])] = words.groupBy(word => word)
    println("======group=======")
    for(item <- group.collect()){
      println(item)
    }
    // 对分组后的数据进行转换
    val wordCount:RDD[(String,Int)] = group.map{
      case (word,list) =>{
        (word,list.size)
      }
    }
    val array:Array[(String,Int)] = wordCount.collect()
    for(item <- array){
      println(item)
    }
    // 关闭spark连接
    sc.stop()
  }
}
