package com.shujia.core

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 *  因为是提交到yarn上，可以对hdfs上的数据进行读写
 */
object Demo18SparkYarnSubmit {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    /**
     *  提交到yarn上运行，这个参数依旧不用设置
     */
    //    conf.setMaster("local")
    conf.setAppName("yarn submit")

    val context = new SparkContext(conf)

    //读取hdfs上数据
    val linesRDD: RDD[String] = context.textFile("/bigdata29/data/students.csv")
    println("="*100)
    println(s"分区数为:${linesRDD.getNumPartitions}")
    println("="*100)

    val classKVRDD: RDD[(String, Int)] = linesRDD.map((line: String) => {
      val clazz: String = line.split(",")(4)
      (clazz, 1)
    })

    //统计班级人数
    val clazzNumRDD: RDD[(String, Int)] = classKVRDD.reduceByKey(_ + _)

    //整理一下要写到结果文件中的数据格式
    val resRDD: RDD[String] = clazzNumRDD.map((kv: (String, Int)) => s"${kv._1}\t${kv._2}")

    //删除已经存在的路径
    val hadoopConf = new Configuration()
    val fileSystem: FileSystem = FileSystem.get(hadoopConf)
    //判断路径是否存在
    if(fileSystem.exists(new Path("/bigdata29/sparkout1"))){
      fileSystem.delete(new Path("/bigdata29/sparkout1"),true)
    }

    //将RDD中的数据保存到HDFS上的文件中
    resRDD.saveAsTextFile("/bigdata29/sparkout1")

    /**
     * spark-submit --class com.shujia.core.Demo18SparkYarnSubmit --master yarn --deploy-mode client  spark-1.0.jar
     */


  }
}
