package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo21ClazzNum {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()

    conf.setAppName("Demo21ClazzNum")

    val sc = new SparkContext(conf)


    /**
      * 读取hdfs中的文件
      *
      */
    val studentsRDD: RDD[String] = sc.textFile("/data/students.txt")


    val kvRDD: RDD[(String, Int)] = studentsRDD.map(stu => {
      val clazz: String = stu.split(",")(4)
      (clazz, 1)
    })

    //统计班级的人数
    val clazzNumRDD: RDD[(String, Int)] = kvRDD.reduceByKey((x, y) => x + y)


    //整理数据
    val resultRDD: RDD[String] = clazzNumRDD.map {
      case (clazz: String, num: Int) =>
        s"$clazz\t$num"
    }

    //保存数据, 保存到hdfs
    resultRDD.saveAsTextFile("/data/clazz_num")

    /**
      * 将代码提交到yarn上运行
      * 1、将需要处理的文件上传到hdfs
      * 2、将项目打包上传到集群
      * 3、提交任务
      *   spark-submit --class com.shujia.spark.Demo21ClazzNum --master yarn-client spark-1.0.jar
      * 4、查看结果
      *   hadoop dfs -ls /data
      *
      */

  }

}
