package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo18Action {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Demo18Action")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")

    // foreach：类似map操作，但没有返回值
    stuRDD.foreach(println)

    // take：取n条数据 转换成Scala的数据Array
    // 这里调用的foreach操作实际上是由Array数组提供的
    stuRDD.take(10).foreach(println)

    // collect：将RDD所有的数据转换成Scala的Array
    stuRDD.collect().foreach(println)
    stuRDD.map(line => (line.split(",")(4), 1)).collect().toMap.foreach(println)

    // reduce：把数据整体看成一组，进行聚合操作
    val sumAge: Int = stuRDD.map(line => line.split(",")(2).toInt).reduce(_ + _)
    val cnt: Int = stuRDD.map(line => 1).reduce(_ + _)
    println(sumAge / cnt.toDouble)

    // count：统计RDD数据量
    println(stuRDD.count())

    // reduceByKeyLocally：相当于reduceByKey+collect+toMap
    val clazzCntMap: collection.Map[String, Int] = stuRDD.map(line => (line.split(",")(4), 1)).reduceByKeyLocally(_ + _)
    clazzCntMap.foreach(println)
    val clazzCntMap02: Map[String, Int] = stuRDD.map(line => (line.split(",")(4), 1)).reduceByKey(_ + _).collect().toMap
    clazzCntMap02.foreach(println)

    // lookup：作用在KV格式的RDD上，需要指定一个Key，可以将Key对应的所有的Value取出来，并且构建成Scala中的本地集合
    println(stuRDD.map(line=>(line.split(",")(4), 1)).lookup("文科一班"))

    // 保存文件相关的：saveAsTextFile、saveAsObjectFile


  }

}
