package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo8FilterKey {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local")
      .appName("filterkey")
      .getOrCreate()

    val sc: SparkContext = spark.sparkContext


    val linesRDD: RDD[String] = sc.textFile("data/word")


    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

    /**
     * 过滤掉少量导致倾斜的key
     *
     */
    //val filterRDD: RDD[String] = wordsRDD.filter((word: String) => word != "null")

    val kvRDD: RDD[(String, Int)] = wordsRDD.map((_, 1))

    val groupByRDD: RDD[(String, Iterable[(String, Int)])] = kvRDD.groupBy(kv => kv._1, numPartitions = 5)

    val wordCountRDD: RDD[(String, Int)] = groupByRDD.map {
      case (word: String, iter: Iterable[(String, Int)]) =>
        (word, iter.size)
    }

    wordCountRDD.foreach(println)

    while (true) {}
  }

}
