package com.avicit.zipkinkafkaesserver.sparkapi;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.List;

/**
 * 简单操作spark 类库
 */
public abstract class SparkDoCount {

//    private final JavaSparkContext javaSparkContext;
    private final SparkConf sparkConf;
//    public SparkDoCount(JavaSparkContext javaSparkContext) {
//        this.javaSparkContext = javaSparkContext;
//    }
    public SparkDoCount(SparkConf sparkConf) {
        this.sparkConf = sparkConf;
    }

    /**
     * 设置需要处理的数据集 统计原始数据
     *
     * @return
     */
    protected abstract List dataList();

    /**
     * 得到统计结果
     *
     * @return
     */
    public List<Tuple2<String, Integer>> doCountResult() {
        //统计的原始数据
        JavaSparkContext javaSparkContext = new JavaSparkContext(sparkConf);
        List<String> dataList = dataList();
        JavaRDD<String> stringRDD = javaSparkContext.parallelize(dataList);
        //转为key-value形式
        JavaPairRDD<String, Integer> pairRDD = stringRDD.mapToPair(k -> new Tuple2<>(k, Integer.valueOf(1)));
        List<Tuple2<String, Integer>> resultList = pairRDD.reduceByKey((x, y) -> x + y).collect();
        javaSparkContext.close();
        return resultList;
    }

    public List<Tuple2<String, Long>> topN(int topN) {
        JavaSparkContext javaSparkContext = new JavaSparkContext(sparkConf);
        JavaRDD<Tuple2<String, Long>> parallelize = javaSparkContext.parallelize(dataList());
        Function<Tuple2<String, Long>, Long> function = new Function<Tuple2<String, Long>, Long>() {
            @Override
            public Long call(Tuple2<String, Long> v1) throws Exception {
                return v1._2;
            }
        };
        //分区
        int numPartitions = parallelize.getNumPartitions();
        // ascending 是否进行渐进排序 true 顺序，false 倒排
        List<Tuple2<String, Long>> topNList = parallelize.sortBy((Function<Tuple2<String, Long>, Long>) v1 -> v1._2, false, numPartitions).take(topN);
        javaSparkContext.close();
        return topNList;
    }

    /**
     * 平均值计算
     * @return
     */
    public List<Tuple2<String, Double>> averageForDataList() {
        List<Tuple2<String,Double>> dataList = dataList();
        JavaSparkContext javaSparkContext = new JavaSparkContext(sparkConf);
        JavaRDD<Tuple2<String, Double>> parallelize = javaSparkContext.parallelize(dataList);

        List<Tuple2<String, Double>> collect = parallelize.mapToPair((PairFunction<Tuple2<String, Double>, String, Double>) stringIntegerTuple2 -> new Tuple2<String, Double>(stringIntegerTuple2._1, Double.valueOf(stringIntegerTuple2._2))).combineByKey(score -> new Tuple2<>(score, 1), // 将score映射为一个元组，作为分区内聚合初始值
                (t, score) -> new Tuple2<>(t._1() + score, t._2() + 1), //分区内聚合，
                (a, b) -> new Tuple2<>(a._1() + b._1(), a._2() + b._2())  //分区间聚合
        ).mapToPair(info -> new Tuple2<>(info._1(), info._2()._1() / info._2()._2()))//计算结果
                .collect();
        javaSparkContext.close();
        return collect;


    }

}
