package com.zxj;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import scala.Tuple2;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class DistCp {

    public static void main(String[] args) throws IOException {
        if (args.length < 4) {
            System.err.println("请输入原路径与目标路径");
            System.exit(1);
        }

        // 原目录
        String source = args[0];
        // 目标目录
        String target = args[1];
        // 是否忽略错误
        boolean ignore = Boolean.parseBoolean(args[2]);
        // 最大并发 task 数
        String taskNum = args[3];

        SparkSession spark = SparkSession
                .builder()
                .appName("DistCp-zxj")
                .getOrCreate();

        SparkContext sparkContext = spark.sparkContext();

        JavaSparkContext jsc = new JavaSparkContext(sparkContext);

        FileSystem fs = FileSystem.get(sparkContext.hadoopConfiguration());

        // 所有需要复制的文件
        List<Tuple2<String, String>> fileList = mkdirDir(fs, source, target, ignore);

        JavaPairRDD<String, String> rdd = jsc.parallelizePairs(fileList, Integer.parseInt(taskNum));

        rdd.foreach(tup -> {
            // Configuration和FileSystem必须为新对象，否则会java.io.NotSerializableException报错
            Configuration hadoopConf = new Configuration();
            try {
                Path p1 = new Path(tup._1());
                Path p2 = new Path(tup._2());
                FileUtil.copy(
                        FileSystem.newInstance(p1.toUri(), hadoopConf), p1,
                        FileSystem.newInstance(p2.toUri(), hadoopConf), p2,
                        false, hadoopConf);
            } catch (IOException e) {
                if (ignore) {
                    e.printStackTrace();
                } else {
                    throw e;
                }
            }
        });
        rdd.collect();

        spark.close();
    }

    /**
     * 返回所有需要复制的文件的原->目标文件tuple集合
     * @param fs
     * @param source
     * @param target
     * @param ignore
     * @return
     * @throws IOException
     */
    private static List<Tuple2<String, String>> mkdirDir(FileSystem fs, String source, String target, boolean ignore) throws IOException {
        List<Tuple2<String, String>> fileList = new ArrayList<>();
        try {
            FileStatus[] fileStatuses  = fs.listStatus(new Path(source));
            for (FileStatus status : fileStatuses) {
                String name = status.getPath().getName();
                // 若为目录创建目录并递归，若为文件添加到集合
                if (status.isDirectory()) {
                    fs.mkdirs(new Path(target + "/" + name));
                    List<Tuple2<String, String>> subFileList = mkdirDir(fs, status.getPath().toString(), target + "/" + name, ignore);
                    fileList.addAll(subFileList);
                } else {
                    Tuple2<String, String> tuple2 = new Tuple2<>(status.getPath().toString(), target + "/" + name);
                    fileList.add(tuple2);
                }
            }
        } catch (IOException e) {
            if (ignore) {
                e.printStackTrace();
            } else {
                throw e;
            }
        }
        return fileList;
    }

}
