package com.shujia.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

import scala.util.Random

object Demo3DoubleReduce {
  def main(args: Array[String]): Unit = {
    //1、创建spark环境
    val spark: SparkSession = SparkSession
      .builder()
      .appName("fp")
      .master("local[4]")
      .getOrCreate()

    //获取sparkContext环境
    val sc: SparkContext = spark.sparkContext

    //读取数据
    val linesRDD: RDD[String] = sc.textFile("spark/data/words")

    //一行转换成多行
    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

    //1、增加随机前缀
    val proRDD: RDD[String] = wordsRDD.map(word => {
      val i: Int = Random.nextInt(10)
      s"${i}_${word}"
    })

    //统计单词的数量
    val groupRDD: RDD[(String, Iterable[String])] = proRDD.groupBy(word => word)

    val oneCountRDD: RDD[(String, Int)] = groupRDD.map {
      case (word: String, iter: Iterable[String]) =>
        (word, iter.size)
    }

    //去掉前缀，再聚合一次
    val kvRDD: RDD[(String, Int)] = oneCountRDD.map {
      case (word: String, count: Int) =>
        val newWord: String = word.split("_").last
        (newWord, count)
    }

    //按照单词分组
    val twoRDD: RDD[(String, Iterable[(String, Int)])] = kvRDD.groupBy(kv => kv._1)

    val wordCountRDD: RDD[(String, Int)] = twoRDD.map {
      case (word: String, iter: Iterable[(String, Int)]) =>
        val count: Int = iter.map(_._2).sum
        (word, count)
    }

    wordCountRDD.foreach(println)

    while (true){

    }
  }
}
