package com.spark.WorCount

import org.apache.spark.{SparkConf, SparkContext}

object ActionOpsScala {
  def main(args: Array[String]): Unit = {
    val sc=getSparkContext
    //reduce：聚合计算
    //reduceOp(sc);
    //collect：获取RDD元素集合(所有元素)
    collectOp(sc);
    //take(n)：获取前n个元素
    takeOp(sc);
    //count：获取元素总数
    countOp(sc);
    //saveAsTextFile：保存文件
    //saveAsTextFileOp(sc);
    //countByKey：统计相同的key出现多少次
    countByKeyOp(sc);
    //foreach：迭代遍历元素
    //foreachOp(sc);
    sc.stop()
  }
  //聚合计算
  def reduceOp(sc: SparkContext) = {
    val dataRdd=sc.parallelize(Array(1,2,3,4,5,6))
    val sum=dataRdd.reduce(_ + _)
    println(sum)
  }
  //获取RDD元素集合(所有元素)
  def collectOp(sc: SparkContext) = {
    val dataRdd=sc.parallelize(Array(1,2,3,4,5,6))
    dataRdd.collect().foreach(println(_));
  }
  //获取前n个元素
  def takeOp(sc: SparkContext) = {
    val dataRdd=sc.parallelize(Array(1,2,3,4,5,6))
    dataRdd.take(3).foreach(println(_))
  }
  //获取元素总数
  def countOp(sc: SparkContext) = {
    val dataRdd=sc.parallelize(Array(1,2,3,4,5,6))
    val count=dataRdd.count()
    println(count)
  }
  //保存文件到指定的目录
  def saveAsTextFileOp(sc: SparkContext) = {
    val dataRdd=sc.parallelize(Array(1,2,3,4,5,6))
    //指定HDFS的路径信息即可，指定路径下的目录不必须存在
    dataRdd.saveAsTextFile("hdfs://hadoop101:9000/out1111")
  }
  //统计相同的key出现的次数
  def countByKeyOp(sc: SparkContext) = {
    val dataRdd=sc.parallelize(Array(("15001","US"),("15002","CN"),("15003","CN"),("15004","USA"),("15005","US")))
    val res=dataRdd.map(tup=>(tup._2,tup._1)).countByKey()
    for ((key,value)<-res){
      println(key+":"+value)
    }
  }
  //获取SparkContext对象
  private def getSparkContext = {
    val conf = new SparkConf()
      .setAppName("TransformationOpsScala")
      .setMaster("local")
    new SparkContext(conf)
  }
}
