package org.geek.spark.test

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import scala.concurrent.duration.{ Duration, DurationInt }
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.collection.mutable.ListBuffer
import java.util.concurrent.Executors
import scala.concurrent.Await
import org.apache.spark.rdd.RDD
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path

/**
 * sparkDistCp hdfs://xxx/source hdfs://xxx/target
 * 结果为：启动多个 task/executor，将 hdfs://xxx/source 目录复制到 hdfs://xxx/target，得到 hdfs://xxx/target/source
 * 支持 source 下存在多级子目录
 * 支持 -i Ignore failures 参数
 * 支持 -m max concurrence 参数，控制同时 copy 的最大并发 task 数
 */
object SparkDistCp {
  //声明OptionMap类型
  type OptionMap = Map[Symbol, Any]

  def main(args: Array[String]): Unit = {
    if (args.length == 0) {
      println("check usage example:SparkDistCp -i -m 4 input output")
    }
    val arglist = args.toList
    val options = nextOption(Map(),arglist)
    println(options)

    val sourceFolder =  String.valueOf(options(Symbol("infile")))
    val targetFolder = String.valueOf(options(Symbol("outfile")))
    val concurrency = (options(Symbol("maxconcurrency"))).toString.toInt
    val ignoreFailure = options(Symbol("ignoreFailure")).toString.toInt

    val sparkConf = new SparkConf().setAppName("SparkDistCp").setMaster("local[1]")
    val sc = new SparkContext(sparkConf)
    val sb = new StringBuffer();
    var fileNames = new ListBuffer[String]()

    val conf = new Configuration();
    //copy路径或文件到targetFolder
    traverseDir(conf, sourceFolder, fileNames);
    fileNames.foreach(
      fileName =>
        try {
          sc.textFile(fileName, concurrency).saveAsTextFile(fileName.replace(sourceFolder, targetFolder));
        } catch {
          case t: Throwable => t.printStackTrace()
            if(ignoreFailure==0){
              throw new Exception("failed to copy "+fileName)
            }
        })
  }


  /**
   * 参数1： map
   * 参数2： list
   * 返回值：OptionMap
   */
  def nextOption(map : OptionMap, list: List[String]) : OptionMap = {
    def isSwitch(s : String) = (s(0) == '-')
    list match {
      case Nil => map
      case "-i" :: value =>  nextOption(map ++ Map('ignoreFailure -> 1), list.tail)
      case "-m" :: value :: tail =>
        nextOption(map ++ Map('maxconcurrency -> value.toInt), tail)
      case string :: Nil  => nextOption(map ++ Map('outfile -> string), list.tail)
      case string :: tail => nextOption(map ++ Map('infile -> string), tail)
      case option :: opt2 :: tail if isSwitch(opt2) =>
        println("Unknown option "+option)
        sys.exit(1)
    }
  }

  /**
   * 参数1： hadoop配置(hdconf)
   * 参数2： 源路径(path)
   * 返回值：路径列表(filePaths)
   */
  def traverseDir(hdconf: Configuration,path: String, filePaths: ListBuffer[String]) {
    val files = FileSystem.get(hdconf).listStatus(new Path(path))
    files.foreach { fStatus => {
      if (!fStatus.isDirectory) {
        filePaths += fStatus.getPath.toString
      } else if (fStatus.isDirectory) {
        traverseDir(hdconf, fStatus.getPath.toString, filePaths)
      }
    }
    }
  }

}
