package com.shujia.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo2FIlterKey {
  def main(args: Array[String]): Unit = {
    //1、创建spark环境
    val spark: SparkSession = SparkSession
      .builder()
      .appName("fp")
      .master("local[4]")
      .getOrCreate()

    //获取sparkContext环境
    val sc: SparkContext = spark.sparkContext

    //读取数据
    val linesRDD: RDD[String] = sc.textFile("spark/data/words")

    //一行转换成多行
    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

    //过滤倾斜的key
    val filterRDD: RDD[String] = wordsRDD.filter(word => word != "null")

    //按照单词分组
    val groupByRDD: RDD[(String, Iterable[String])] = filterRDD.groupBy(word => word)

    //计算单词的数量
    val wordCountRDD: RDD[(String, Int)] = groupByRDD.map {
      case (word: String, iter: Iterable[String]) =>
        val count: Int = iter.size
        (word, count)
    }

    wordCountRDD.foreach(println)

    while (true) {
    }


  }

}
