package core.rdd.instance;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;

public class Spark01_FILE_SAVE_FENPIAN {
    public static void main(String[] args) {
        // 配置SparkConf指向你的Spark master URL
        SparkConf conf = new SparkConf()
                .setAppName("Spark01_FILE_SAVE_FENPIAN") // 应用名称
                .setMaster("local[*]"); // 替换成你的master地址

        // 创建JavaSparkContext，它是与集群交互的主要入口点
        try (JavaSparkContext sc = new JavaSparkContext(conf)) {
            // 创建一个简单的本地集合，并将其并行化为RDD
            JavaRDD<String> rdd = sc.textFile("D:\\Study\\personal-learning\\data\\spark1.txt");

            List<String> collect = rdd.collect();
            JavaRDD<String> parallelize = sc.parallelize(collect, 5);

            Path dirPath = Paths.get("out");

            try {
                // 检查文件夹是否存在
                if (Files.exists(dirPath)) {
                    // 删除文件夹及其内容
                    Files.walk(dirPath)
                            .sorted((path1, path2) -> -path1.compareTo(path2)) // 从子目录到父目录排序
                            .forEach(path -> path.toFile().delete());
                }

                parallelize.saveAsTextFile("out");
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    }
}
