package spark_core.operate_action;

import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

/**
 * @author shihb
 * @date 2020/1/9 11:28
 * 行动算子Demo
 *
 */
public class Demo1 {

  public static void main(String[] args) {

    //local模式,创建SparkConf对象设定spark的部署环境
    SparkConf sparkConf = new SparkConf().setMaster("local[*]").setAppName("");
    //创建spark上下文对象（这边是java上下文）

    JavaSparkContext sc = new JavaSparkContext(sparkConf);

    JavaRDD<Integer> valueRdd = sc.parallelize(Arrays.asList(1, 4, 3, 2,6,5));
    //1.reduce()聚合
    //返回的已经不是Rdd了
    Integer reduce = valueRdd.reduce((o1, o2) -> o1 + o2);
    System.out.println("聚合结果:"+reduce);
    //2.collect()以数组的形式返回值
    List<Integer> collect = valueRdd.collect();
    System.out.println(collect);
    //3.count()统计Rdd的条数
    long count = valueRdd.count();
    System.out.println("Rdd条数:"+count);
    //4.first()取第一个
    Integer first = valueRdd.first();
    System.out.println("Rdd第一个值:"+first);
    //5.take(n)取前N个返回集合
    List<Integer> take = valueRdd.take(3);
    System.out.println("take:"+take);
    //6.take排序后取前N个
    List<Integer> takeOrder = valueRdd.takeOrdered(3);
    System.out.println("takeOrder:"+takeOrder);
    //7.aggregate,需要注意zeroValue分区间也会加一次，aggregateByKey分区间不加
    Integer aggregate = valueRdd.aggregate(0, (o1, o2) -> o1 + o2, (o1, o2) -> o1 + o2);
    System.out.println("aggregate:"+aggregate);
    //8.fold,分区内和分区间算法一样可以省略用fold
    Integer fold = valueRdd.fold(0, (o1, o2) -> o1 + o2);
    System.out.println("fold:"+fold);
    System.out.println("fold:"+fold);
    //9.saveAsTextFile,saveAsObjectFile保存到指定目录下
    //10.countByKey()争对(k,v),返回(k,Long)的map,表示每个key对应的元素个数
    Map<String, Long> countByKey = valueRdd.mapToPair(i -> new Tuple2<>("a", i)).countByKey();
    //11.foreach在数据集的每个元素上运行指定函数
    valueRdd.foreach(i-> System.out.println(i));



  }

}
