package com.shujia.spark.opt

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

object Demo7RePartitionCar {
  def main(args: Array[String]): Unit = {
    //创建spark sql环境（新版spark统一的入口）
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[8]")
      .appName("cache")
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    //获取sparkContext
    val sc: SparkContext = spark.sparkContext

    val carRDD: RDD[String] = sc.textFile("kafka-python/cars.log")
    println(s"carRDD分区数：${carRDD.getNumPartitions}")

    //解析数据
    val districtRDD: RDD[(String, Int)] = carRDD.map(car => {
      val district: String = car.split(",").last
      (district, 1)
    })

    println(s"districtRDD分区数：${districtRDD.getNumPartitions}")

    /**
     * 当shuffle之后数据量变少时需要减少分区数
     * 1、减少产生的小文件数量
     * 2、提高执行效率
     *
     */
    /*    val countRDD: RDD[(String, Int)] = districtRDD
          .reduceByKey((x, y) => x + y, 1)*/

    val countRDD: RDD[(String, Int)] = districtRDD
      .reduceByKey((x, y) => x + y, 1)
      .coalesce(1)

    println(s"countRDD分区数：${countRDD.getNumPartitions}")

    //保存结果
    countRDD.saveAsTextFile("data/district_flow")


    while (true) {}
  }
}
