package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}

object WordCount4 {
  def main(args: Array[String]): Unit = {
//    val conf = new SparkConf()
//    conf.setAppName("测试服务器提交单词统计")
//
//    val sc = new SparkContext(conf)

    val ss: SparkSession = SparkSession.builder()
//      .master("local")
      .appName("test1")
      .getOrCreate()

    val sc: SparkContext = ss.sparkContext


    val lineRDD: RDD[String] = sc.textFile("/bigdata33/data/words2.txt",1)


    println("====================================================")
    println(s"====================lineRDD的分区数：${lineRDD.getNumPartitions}===========================")
    println("====================================================")

    val resRDD: RDD[(String, Int)] = lineRDD.flatMap(_.split(" "))
      .map((_, 1))
      .reduceByKey(_ + _)

    resRDD.saveAsTextFile(args(0))

  }
}

/**
 *  yarn-client:
 *    spark-submit --class com.shujia.core.WordCount4 --master yarn --deploy-mode client spark-1.0-SNAPSHOT.jar
 * yarn-cluster:
 *    spark-submit --class com.shujia.core.WordCount4 --master yarn --deploy-mode cluster spark-1.0-SNAPSHOT.jar
 */
