package TestRDDConnectionsAndSparkReadsAndWritesFiles.ReadsAndWritesFiles;

import org.apache.hadoop.conf.Configuration;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;


import java.util.Arrays;
import java.util.List;

public class Writes {
    public static void main(String[] args) {
        /**
         * 在spark中，
         * 可以通过调用saveAsTextFile方法将RDD的数据写入到普通文本文件；
         * saveAsTextFile接收一个存储路径，该路径可以是HDFS路径，也可以是本地文件系统的路径。
         */
        SparkConf sparkConf = new SparkConf().setAppName("Writes").setMaster("local");
        JavaSparkContext sc = new JavaSparkContext(sparkConf);
//        Configuration conf = new Configuration();
//        conf.set("fs.defaultFS", "hdfs://20200317036-hadoop01:9000");
        String path = "hdfs://169.254.0.100:9000";
        String s = path + "/out/out1" ;


        List<String> strings = Arrays.asList("data1", "data2", "data3");
        JavaRDD<String> rdd1 = sc.parallelize(strings);

        rdd1.saveAsTextFile(s);
//        JavaRDD<String> rdd2 = sc.textFile(s);

//        System.out.println(rdd2.collect());

    }
}
