package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo18SparkSubmit {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()

    //提交到集群运行，代码中的master不需要写，再提交任务时通过--master执指定
    //conf.setMaster("local")

    conf.setAppName("submit")

    val sc = new SparkContext(conf)

    //1、读取hdfs中的数据
    val studentsRDD: RDD[String] = sc.textFile("/data/student")

    //2、统计班级的人数
    val kvRDD: RDD[(String, Int)] = studentsRDD.map(linn => (linn.split(",").last, 1))
    val clazzNumRDD: RDD[(String, Int)] = kvRDD.reduceByKey((x, y) => x + y)

    //3、整理数据
    val resultRDD: RDD[String] = clazzNumRDD.map {
      case (clazz: String, num: Int) =>
        s"$clazz\t$num"
    }

    //4、保存结果到hdfs中
    resultRDD.saveAsTextFile("/data/clazz_num")

    /**
     * 提交任务
     * spark-submit  --master yarn-client --class com.shujia.spark.core.Demo18SparkSubmit spark-1.0.jar
     *
     * spark-submit  --master yarn-cluster --class com.shujia.spark.core.Demo18SparkSubmit spark-1.0.jar
     *
     * 查看结果
     * hdfs dfs -ls /data/clazz_num
     */


  }

}
