package com.example.spark.core

import com.example.util.SparkUtil
import org.apache.commons.lang3.StringUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

/**
 * @title: WordCount
 * @projectName bigdata
 * @description: WordCount text1
 * @author leali
 * @date 2022/5/15 7:20
 */
object WordCount {
  Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
  Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)

  /**
   * 参数化提交
   *
   * @param args 2
   */
  def main(args: Array[String]): Unit = {
    val sparkContext: SparkContext = SparkUtil.initSimpleSparkContent("WordCount")
    /**
     * 分区操作
     * map->mapPartitions
     * foreach-foreachPartition
     */
    val value: RDD[(String, Int)] = sparkContext.textFile("src/data/input/word.txt")
      .filter(StringUtils.isNoneBlank(_: String))
      .flatMap((_: String).split(" "))
      .map((_: String, 1))
      .reduceByKey((_: Int) + (_: Int))
      .map((_: (String, Int)).swap)
      .sortByKey(ascending = false)
      .map((_: (Int, String)).swap)

    val tuples: Array[(String, Int)] = value.collect().take(10)
    //print
    SparkUtil.printLog("直接输出")
    tuples.foreach(println)
    //way2
    SparkUtil.printLog("收集为本地集合再输出")
    println(tuples.toBuffer)
    // save
    SparkUtil.printLog("输出到指定path(可以是文件/夹)")

    /**
     * 有多少分区就有多少文件
     */
    value.repartition(1).saveAsTextFile("src/data/output/sc_result1")
    value.repartition(2).saveAsTextFile("src/data/output/sc_result2")

    //为了便于查看Web-UI可以让程序睡一会
    Thread.sleep(1000 * 60)

    //关闭资源
    sparkContext.stop()
  }
}
