package cn.tranq

import org.apache.commons.cli.{BasicParser, HelpFormatter, OptionBuilder, Options, Option => CliOption}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

object App {

    case class Config(ignoreFailures: Boolean, maxConcurrence: Int)

  def main(args: Array[String]): Unit= {

    val (config, argList) = getConfig(args)
    if(config == null)
      return

    val spark = SparkSession
      .builder()
      .appName("distcp")
      .getOrCreate()
    val sc = spark.sparkContext

    val fileList = new ListBuffer[(Path, Path)]
    checkDir(sc, new Path(argList(0)), new Path(argList(1)), config, fileList)

    copy(sc, fileList, config)

    sc.stop()
  }

  def checkDir(sc: SparkContext, sourcePath: Path, targetPath: Path,
               config:Config, fileList: ListBuffer[(Path, Path)]): Unit ={
    val fs = FileSystem.get(sc.hadoopConfiguration)
    fs.listStatus(sourcePath)
      .foreach(src=>{
        if(src.isDirectory){
          val subPath  = src.getPath.toString.split(sourcePath.toString)(1)
          val targetSubPath = new Path(targetPath + subPath)
          try{
            fs.mkdirs(targetSubPath)
          } catch {
            case e: Exception =>
              if(!config.ignoreFailures) throw e else println(s"mkdir '${targetSubPath}' error:", e.getMessage)
          }
          checkDir(sc, src.getPath, targetSubPath, config, fileList)
        }else{
          fileList.append((src.getPath, targetPath))
        }
      })
  }

  def copy(sc: SparkContext, fileList: ListBuffer[(Path, Path)], config: Config): Unit ={
    val rdd = sc.makeRDD(fileList, config.maxConcurrence)
    sc.
    rdd.mapPartitions(it => {
      val hadoopConf = new Configuration()
      it.foreach(tup => {
        try {
          FileUtil.copy(tup._1.getFileSystem(hadoopConf), tup._1, tup._2.getFileSystem(hadoopConf), tup._2, false, hadoopConf)
        } catch {
          case e: Exception =>
            if (!config.ignoreFailures) throw e else println(s"copy ${tup._1.getName} error:", e.getMessage)
        }
      })
      it
    }).collect()
  }


  def getConfig(args: Array[String]): (Config, Array[String])={
      val ignoreFailure = new CliOption("i", "ignore failures")
      val maxConcurrence = OptionBuilder.create("m")
      maxConcurrence.setArgName("m")
      maxConcurrence.setArgs(1)

      val options = new Options()
      options.addOption(ignoreFailure)
      options.addOption(maxConcurrence)


      try{
        val cmd = new BasicParser().parse(options, args)
        (Config(cmd.hasOption("i"), cmd.getOptionValue("m", "4").toInt), cmd.getArgs)
      } catch {
        case e: Exception =>
          val help = new HelpFormatter ()
          println(e)
          help.printHelp ("java distcp.jar [options] <source-dir> <target-dir>", options)
          (null, null)
      }
    }

  def listFiles(path: String, sc: SparkContext): Array[String] = {
    val fs = FileSystem.get(sc.hadoopConfiguration)
    val itr = fs.listFiles(new Path(path), true)
    val list = new ListBuffer[String]

    while (itr.hasNext) {
      val fileStatus = itr.next()
      if (fileStatus.isFile) {
        list.append(fileStatus.getPath.getParent.getName)
      }
    }
    fs.close()
    list.toArray
  }
}
