package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo8File {
  def main(args: Array[String]): Unit = {
    //创建spark sql环境（新版spark统一的入口）
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[8]")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //获取sparkContext
    val sc: SparkContext = spark.sparkContext

    //生成小文件
    /*    sc.textFile("data/words")
          .repartition(100)
          .saveAsTextFile("data/word1")*/

    val linesRDD: RDD[String] = sc
      .textFile("data/word1")
      //合并小文件
      .coalesce(10)

    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))
    val kvRDD: RDD[(String, Int)] = wordsRDD.map((_, 1))

    val countRDD: RDD[(String, Int)] = kvRDD.reduceByKey((x, y) => x + y)
    countRDD.saveAsTextFile("data/word_count")

    while (true) {}
  }

}
