package org.apache.spark.examples;

/**
 * Created by admin on 2019/3/20.
 */
import org.apache.spark.examples.sql.model.*;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.SparkConf;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.util.LongAccumulator;
import scala.Tuple2;

import java.util.Arrays;
import java.util.List;

public class SimpleApp {
    public static void main(String[] args) {
        System.setProperty("hadoop.home.dir", "C:/hadoop-2.6.0");
        String appName = "spark-demo";
        String master="local[2]";
        SparkConf conf = new SparkConf().setAppName(appName).setMaster(master);
        JavaSparkContext sc = new JavaSparkContext(conf);
        //累加器用于添加数据元素
        LongAccumulator accum = sc.sc().longAccumulator();
        sc.parallelize(Arrays.asList(1,2,3,4)).foreach(x ->accum.add(x));
        System.out.println("累加器 accum= "+accum.value());
        JavaRDD<String>  lines = sc.textFile("hdfs://172.30.17.164:8020/streamsets/text.txt");
//        JavaRDD<String>  lines = sc.textFile("E:\\Salesorder.csv");
//        lines.foreach(s -> System.out.println(s));
//        System.out.println(lines);
        JavaRDD<Integer> lineLengths = lines.map(s -> s.length());
        int totalLength = lineLengths.reduce((a, b) -> a + b);
        //lineLengths以后想再次使用，我们可以添加：
        lineLengths.persist(StorageLevel.MEMORY_ONLY());
        System.out.println("总数=〉"+totalLength);

        JavaRDD<Integer> lineLengths_function = lines.map(new GetLength());
        int totalLength_function = lineLengths.reduce(new Sum());
        System.out.println("length=>"+totalLength_function);

        int counter = 0;
        List<Integer> data = Arrays.asList(1, 2, 3, 4, 5);
        JavaRDD<Integer> rdd = sc.parallelize(data);
//        rdd.foreach(x -> counter += x);
        System.out.println("Counter value: " + counter);

        JavaPairRDD<String, Integer> pairs = lines.mapToPair(s -> new Tuple2(s, 1)); //键值对的RDD由JavaPairRDD类表示

        JavaPairRDD<String, Integer> counts = pairs.reduceByKey((a, b) -> a + b);  //以下代码使用reduceByKey键值对上的操作来计算文件中每行文本出现的次数：
        //ounts.sortByKey()例如，我们还可以使用按字母顺序对对进行排序，最后 counts.collect()将它们作为对象数组返回到驱动程序。
        JavaPairRDD<String, Integer> sortByKey = counts.sortByKey();
        System.out.println(sortByKey.collect());// 打印数据
        System.out.println(sortByKey.count());  //统计
        System.out.println(sortByKey.first()+"   "+sortByKey.take(2));
//        sortByKey.saveAsHadoopFile();
        sortByKey.saveAsTextFile("hdfs://172.30.17.164:8020/streamsets/sparktext.txt/");
        sc.stop();
    }
}
