package com.jonas.sparkwork.answer2

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, FileUtil, Path}
import org.apache.spark.sql.SparkSession

import scala.collection.mutable

/**
 * @author Jonas Gao
 * @since 2022/4/16
 */
object Answer2App {

  def main(args: Array[String]): Unit = {
    val options = Options.parse(args)

    val session = SparkSession
      .builder()
      .appName("Jonas Work / Spark Distcp")
      .getOrCreate()

    val context = session.sparkContext
    val conf = context.hadoopConfiguration
    val fs = FileSystem.get(conf)

    if (!fs.exists(options.from)) {
      // 文件不存在...
      throw new SourceNotFoundException
    }

    val files = FileLister.listAll(fs, options.from)
    val wd = new Path("/")
    val source = options.from.makeQualified(fs.getUri, wd)
    val dist = options.to.makeQualified(fs.getUri, wd)

    context
      .parallelize(files, if (options.max > 0) options.max else context.defaultParallelism)
      .map(f => {
        val path = source.toUri.relativize(f.getPath.toUri).getPath
        (f, new Path(dist, path))
      })
      .filter({ case (f, dp) =>
        if (f.isDirectory) {
          f.getPath.getFileSystem(new Configuration()).mkdirs(dp, f.getPermission)
          // 同时把目录过滤掉，下一步只复制文件即可
          false
        } else {
          // 如果是文件下一波
          true
        }
      })
      .map({ case (f, dp) =>
        val conf = new Configuration()
        val sourceFs = f.getPath.getFileSystem(conf)
        val distFs = dp.getFileSystem(conf)
        FileUtil.copy(sourceFs, f, distFs, dp, false, true, conf)
        dp
      })
      .saveAsTextFile("hdfs:///user/jonas/distcp_output")
  }
}
