package com.study.utils

import org.apache.hadoop.fs.{FileSystem, LocatedFileStatus, Path, RemoteIterator}
import org.apache.spark.sql.SparkSession

class HadoopFS {

  var fs : FileSystem = _
  var spark:SparkSession = _

  def this(spark:SparkSession){
    this()
    this.spark = spark
    this.fs = FileSystem.get(spark.sparkContext.hadoopConfiguration)
  }

  /**
   * 将hdfs文件重新命名
   * @param filename 原文件
   * @param newFilename 新文件
   * @return
   */
  def mv(filename:String,newFilename:String):Boolean = {
    fs.rename(new Path(filename),new Path(filename,newFilename))
  }

  /**
   * 判断路径是否存在
   * @param filename 文件名
   * @return
   */
  def exists(filename:String):Boolean = {
    fs.exists(new Path(filename))
  }

  /**
   * 删除文件
   * @param filename 文件名
   * @return
   */
  def rm(filename:String):Boolean = {
    fs.delete(new Path(filename), true)
  }

  def tmpFile(filename:String):String = {
    filename +".tmp"
  }

  //获取hdfs路径下的所有文件
  def list(dir:String,recursive:Boolean): RemoteIterator[LocatedFileStatus] = {
    fs.listFiles(new Path(dir),recursive)
  }

}

object HadoopFS{

  def apply(spark:SparkSession): HadoopFS = new HadoopFS(spark)

}
