package com.shujia.core.actions

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

/**
 *  collect算子属于行动算子，主要目的将spark的rdd转成scala的数据类型使用
 */
object CollectOpt {
  def main(args: Array[String]): Unit = {
    //计算每个班级的人数
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("union合并")

    val sc = new SparkContext(conf)

    val studentTupleRDD: RDD[(String, String, Int, String, String)] = sc.textFile("spark/data/students.txt")
      .map(_.split(",")) // [xxx,xxx,xx,xx,xxx]
      .map {
        case Array(id: String, name: String, age: String, gender: String, clazz: String) => (id, name, age.toInt, gender, clazz)
      }

    val resRDD: RDD[(String, Int)] = studentTupleRDD.map {
      case (_, _, _, _, clazz: String) => (clazz, 1)
    }.reduceByKey(_ + _)


    //使用collect
    // RDD -> scala
    val arr1: Array[(String, Int)] = resRDD.collect()
    val arr2: Array[(String, Int)] = arr1.map((kv: (String, Int)) => ("数加:" + kv._1, kv._2))


  }
}
