package org.zjt.demo;

import org.apache.spark.api.java.function.Function2;
import scala.Serializable;
import scala.Tuple2;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;

import java.util.Arrays;
import java.util.Map;
import java.util.regex.Pattern;

public final class TestApi {
    private static final Pattern SPACE = Pattern.compile(",");
    private static final String FILE = "test.txt";


    public static void main(String[] args) throws Exception {
        wordContentOthers();
    }


    /**
     * 统计文件中不同文字的个数
     */
    public static void wordCount() {
        new TestHandler<JavaPairRDD<String, Integer>>().executor(
                javaSparkContext -> {
                    JavaRDD<String> lines = javaSparkContext.textFile(FILE);

                    /**1、s：传输的整个文件内容     分组后发送 */
                    JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(SPACE.split(s)).iterator());

                    /**2、设置每个字的初始化数值为1*/
                    JavaPairRDD<String, Integer> ones = words.mapToPair(s -> new Tuple2<>(s, 1));

                    /**3、利用key来分词，实现的*/
                    return ones.reduceByKey((i1, i2) -> i1 + i2);
                }
        );

    }


    /**
     * filter的使用测试
     */
    public static void wordFilterCount() {
        new TestHandler<JavaPairRDD<String, Integer>>().executor(
                javaSparkContext -> {
                    JavaRDD<String> lines = javaSparkContext.textFile(FILE);

                    /**1、s：传输的整个文件内容     分组后发送 */
                    JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(SPACE.split(s)).iterator());

                    /**2、过滤掉不包含a的元素*/
                    JavaRDD<String> filters = words.filter(a -> a.contains("a"));

                    /**3、设置每个字的初始化数值为1*/
                    JavaPairRDD<String, Integer> ones = filters.mapToPair(s -> new Tuple2<>(s, 1));

                    /**4、利用key来分词，实现的*/
                    return ones.reduceByKey((i1, i2) -> i1 + i2);
                }
        );

    }


    /**
     * map的使用测试
     */
    public static void wordMapCount() {
        new TestHandler<JavaPairRDD<String, Integer>>().executor(
                javaSparkContext -> {
                    JavaRDD<String> lines = javaSparkContext.textFile(FILE);

                    /**1、s：传输的整个文件内容     分组后发送 */
                    JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(SPACE.split(s)).iterator());

                    /**2、map将元素转化为大写的*/
                    JavaRDD<String> filters = words.map(String::toUpperCase);

                    /**3、设置每个字的初始化数值为1*/
                    JavaPairRDD<String, Integer> ones = filters.mapToPair(s -> new Tuple2<>(s, 1));

                    /**4、利用key来分词，实现的*/
                    return ones.reduceByKey((i1, i2) -> i1 + i2);
                }
        );
    }


    /**
     * count（）、first（） 测试
     */
    public static void wordCountFirstCount() {
        new TestHandler<JavaPairRDD<String, Integer>>().executor(
                javaSparkContext -> {
                    JavaRDD<String> lines = javaSparkContext.textFile(FILE);

                    /**1、s：传输的整个文件内容     分组后发送 */
                    JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(SPACE.split(s)).iterator());

                    /**2、map将元素转化为大写的*/
                    JavaRDD<String> filters = words.map(String::toUpperCase);

                    /**3、设置每个字的初始化数值为1*/
                    JavaPairRDD<String, Integer> ones = filters.mapToPair(s -> new Tuple2<>(s, 1));

                    System.out.println(String.format("count:%s\tfirst:/%s", ones.count(), ones.first()));

                    /**4、利用key来分词，实现的*/
                    return ones.reduceByKey((i1, i2) -> i1 + i2);
                }
        );
    }


    /**
     * javaSparkContext.parallelize()读取list内容
     */
    public static void wordContentCount() {
        new TestHandler<JavaPairRDD<String, Integer>>().executor(
                javaSparkContext -> {

                    /** parallelize()读取文件*/
                    String conent = "zhang,jun,tao,zhang,li,wang,li,zhang,jun,tao\n";
                    JavaRDD<String> lines = javaSparkContext.parallelize(Arrays.asList(conent.split(",")));

                    JavaRDD<String> words = lines.flatMap(s -> Arrays.asList(SPACE.split(s)).iterator());
                    JavaRDD<String> filters = words.map(String::toUpperCase);
                    JavaPairRDD<String, Integer> ones = filters.mapToPair(s -> new Tuple2<>(s, 1));
                    return ones.reduceByKey((i1, i2) -> i1 + i2);
                }
        );
    }

    /**
     * 求每个数的平方
     */
    public static void wordContentSquare() {
        new TestHandler<JavaRDD<Integer>>().executor(
                javaSparkContext -> {
                    /** parallelize()读取文件*/
                    JavaRDD<Integer> lines = javaSparkContext.parallelize(Arrays.asList(1, 2, 3, 43, 42));

                    JavaRDD<Integer> filters = lines.map(a -> a * a);
                    return filters;
                }
        );
    }


    /**
     * aggregate（）求平均值
     * ps:
     * aggregate只能输出一个对象
     */
    public static void wordContentAvg() {
        new TestHandler<AvgNum>().executor(
                javaSparkContext -> {
                    JavaRDD<Integer> lines = javaSparkContext.parallelize(Arrays.asList(1, 2, 3, 43, 42));
                    /**1、零值设定  */
                    AvgNum zero = new AvgNum(0, 0);


                    /**2、将int元素相加组成一个元素AvgNum */
                    Function2<AvgNum, Integer, AvgNum> addAndCount = (a, elment) -> {
                        a.num += 1;
                        a.total += elment;
                        return a;
                    };

                    /**3、合并并处理元素 */
                    Function2<AvgNum, AvgNum, AvgNum> combOp = (sum, b) -> {
                        sum.total += b.total;
                        sum.num += b.num;
                        System.out.println("combOp:" + b);
                        return sum;
                    };

                    return lines.aggregate(new AvgNum(0, 0),
                            (a, element) -> {
                                a.num += 1;
                                a.total += element;
                                return a;
                            }, (sum, b) -> {
                                sum.total += b.total;
                                sum.num += b.num;
                                System.out.println("combOp:" + b);
                                return sum;
                            });
                }
        );
    }






    /**
     * reduce 、foreach、take 测试
     *
     * PS : reduce aggregate 都只返回一个元素
     */
    public static void wordContentOthers() {
        new TestHandler<JavaRDD<Integer>>().executor(
                javaSparkContext -> {
                    JavaRDD<Integer> lines = javaSparkContext.parallelize(Arrays.asList(1, 2, 2,3,43, 43, 42));
                    for (int i = 0; i < lines.count(); i++) {
                        System.out.printf("   %s",lines.take(i));
                    }


                    /** 不能用function，会报错*/
                    lines.foreach(a->System.out.println(a));


                    /** countByValue对相同元素个数的统计  （统计个数） */
                    Map<Integer,Long> elmentCount = lines.countByValue();
                    elmentCount.forEach((a,b) -> System.out.println(String.format("key:%s\tvalue:%s",a,b)));


                    /** reduce 返回一个值  */
                    Integer sum = lines.reduce((a,b)->a+=b);
                    System.out.println(String.format("sum:%s",sum));

                    int j = 1/0;
                    return lines;
                }
        );
    }







    static class AvgNum implements Serializable {
        public Integer num;
        public Integer total;
        public Double avg;

        public Double getAvg() {
            this.avg = this.total / (double) this.num ;
            return this.avg;
        }

        public AvgNum(Integer num, Integer total) {
            this.num = num;
            this.total = total;
        }

        public AvgNum() {
        }

        @Override
        public String toString() {
            return "AvgNum{" +
                    "num=" + num +
                    ", total=" + total +
                    '}';
        }
    }


}
