package com.shujia.spark.core

import com.shujia.spark.util.HdfsUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo18SparkYarnSubmit {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()

    conf.setAppName("submit")

    /**
     * 提交到集群运行需要注释master
     *
     */
    // conf.setMaster("local")

    val sc = new SparkContext(conf)

    //读取hdfs中的文件
    val linesRDD: RDD[String] = sc.textFile("/data/student")

    //取出班级
    val kvRDD: RDD[(String, Int)] = linesRDD.map((line: String) => {
      val clazz: String = line.split(",")(4)
      (clazz, 1)
    })

    //统计班级的人数
    val clazzNumRDD: RDD[(String, Int)] = kvRDD.reduceByKey(_ + _)

    //整理数据
    val resultRDD: RDD[String] = clazzNumRDD.map {
      case (clazz: String, num: Int) =>
        s"$clazz\t$num"
    }

    /**
     * 再保持数据之前删除hdfs中已存在的路径
     *
     */
    HdfsUtil.delete("/data/clazz_num")


    //将数据保持到hdfs中
    resultRDD.saveAsTextFile("/data/clazz_num")

    /**
     * 将项目打包上传到服务器
     *
     * spark-submit --master yarn-client --class com.shujia.spark.core.Demo18SparkYarnSubmit spark-1.0.jar
     *
     */

  }

}
