package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo14collect {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("MapValues算子演示")

    val sc: SparkContext = new SparkContext(conf)

    val linesRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    val rdd1: RDD[Array[String]] = linesRDD.map((e: String) => {
      e.split(",")
    })

    val rdd2: RDD[Student] = rdd1.map {
      case Array(id: String, name: String, age: String, gender: String, clazz: String) =>
        Student(id.toInt, name, age.toInt, gender, clazz)
    }

    //collect将rdd转成合适的scala中的数据结构
    val stuArray: Array[Student] = rdd2.collect()
    //foreach是scala中的foreach，不会产生作业执行的
    stuArray.foreach(println)
    while (true){

    }

  }
}

case class Student(id: Int, name: String, age: Int, gender: String, clazz: String)
