package com.cike.sparkstudy.core.java;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;
import java.util.Map;

public class ActionOperation {
    public static void main(String args[]){
        SparkConf conf = new SparkConf().setAppName("ActionOperation").setMaster("local");

        JavaSparkContext sc = new JavaSparkContext(conf);

        //reduce(sc);
        //collect(sc);
        //count(sc);
        //take(sc);
        // saveAsTextFile();
        countByKey(sc);
    }

    /**
     * 使用reduce对集合进行聚合运算
     * @param sc
     */
    public static void reduce(JavaSparkContext sc){
        List<Integer> numberList = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
        JavaRDD<Integer> numbers = sc.parallelize(numberList);

        /**
         * 使用reduce算子对集合中的元素进行累加
         *      原理：把第一、第二个元素传入call()方法中计算，得到结果。1+2=3
         *           然后把结果和下一个元素传入call()方法中进行计算，如3+3=6，以此类推
         */
        Integer sum = numbers.reduce(new Function2<Integer, Integer, Integer>() {
            @Override
            public Integer call(Integer v1, Integer v2) throws Exception {
                return v1 + v2;
            }
        });

        System.out.println("集合的总和是："+sum);

        sc.close();
    }

    /**
     *
     * @param sc
     */
    public static void collect(JavaSparkContext sc){
        List<Integer> numberList = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
        JavaRDD<Integer> numbers = sc.parallelize(numberList);

        JavaRDD<Integer> doubleNumbersRDD = numbers.map(new Function<Integer, Integer>() {
            @Override
            public Integer call(Integer v1) throws Exception {
                return v1 * 2;
            }
        });


        /**
         * 遍历，collect是把远程的数据拉到Driver本地进行运算，一般不推荐使用
         * 一般推荐使用foreach()
         */
        List<Integer> collectNumbers = doubleNumbersRDD.collect();
        for (Integer num : collectNumbers){
            System.out.println(num);
        }

        sc.close();
    }

    /**
     * 统计一个集合中的元素个数
     * @param sc
     */
    public static void count(JavaSparkContext sc){
        List<Integer> numberList = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
        JavaRDD<Integer> numbers = sc.parallelize(numberList);

        long count = numbers.count();

        System.out.println("元素个数总数是："+count);

        sc.close();
    }

    /**
     * take 取RDD中的元素
     * @param sc
     */
    public static void take(JavaSparkContext sc){
        List<Integer> numberList = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
        JavaRDD<Integer> numbers = sc.parallelize(numberList);

        List<Integer> take4Nmbers = numbers.take(4);
        for (Integer num : take4Nmbers){
            System.out.println(num);
        }

        sc.close();
    }

    public static void countByKey(JavaSparkContext sc){
        // 模拟集合
        List<Tuple2<String, String>> scoreList = Arrays.asList(
                new Tuple2<String, String>("class1", "leo"),
                new Tuple2<String, String>("class2", "jack"),
                new Tuple2<String, String>("class1", "marry"),
                new Tuple2<String, String>("class2", "tom"),
                new Tuple2<String, String>("class2", "david"));

        // 并行化集合，创建JavaPairRDD
        JavaPairRDD<String, String> students = sc.parallelizePairs(scoreList);

        // 对rdd应用countByKey操作，统计每个班级的学生人数，也就是统计每个key对应的元素个数
        // 这就是countByKey的作用
        // countByKey返回的类型，直接就是Map<String, Object>

        Map<String, Object> studentsMaps = students.countByKey();

        for (Map.Entry<String,Object> studentMap:studentsMaps.entrySet()){
            System.out.println(studentMap.getKey()+":"+studentMap.getValue());
        }

        sc.close();
    }
}
