package com.gitee.dufafei.spark.connector.hdfs

import java.io.ByteArrayInputStream
import java.util.Properties

import com.gitee.dufafei.spark.pattern.{Borrow, Logging}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FileSystem, Path}
import org.apache.hadoop.io.IOUtils

/**
 * <dependency>
 * <groupId>org.apache.hadoop</groupId>
 * <artifactId>hadoop-client</artifactId>
 * <version>${hadoop.version}</version>
 * </dependency>
 */
class HdfsClient(func:() => FileSystem) extends Borrow with Serializable {

  lazy val fs: FileSystem = func()

  def readInputStream(path: String): FSDataInputStream = fs.open(new Path(path))

  def readText(path: String): String = scala.io.Source.fromInputStream(readInputStream(path)).mkString

  def readProperties(path: String): Properties = {
    val props = new Properties()
    props.load(readInputStream(path))
    props
  }

  /**
   * 追加写
   */
  def append(file: String, content: String,
             lineSeparator: String = "\n", encode: String = "UTF-8", buffSize: Int = 4096): Unit = {
    val in = new ByteArrayInputStream((content + lineSeparator).getBytes(encode))
    val out = fs.append(new Path(file))
    IOUtils.copyBytes(in, out, buffSize, true)
  }

  /**
   * 追加写
   */
  def batchAppend(file: String, content: List[String],
                  lineSeparator: String = "\n", encode: String = "UTF-8", buffSize: Int = 4096): Unit = {
    val out = fs.append(new Path(file))
    using(out) { out =>
      content.foreach { line =>
        val in = new ByteArrayInputStream((line + lineSeparator).getBytes(encode))
        using(in) { in =>
          IOUtils.copyBytes(in, out, buffSize, false)
        }
      }
    }
  }

  /**
   * 上传文件
   */
  def upload(src: Path, dst: Path): Unit = fs.copyFromLocalFile(src, dst)

  /**
   * 下载文件
   */
  def download(src: Path, dst: Path): Unit = fs.copyToLocalFile(src, dst)
}

object HdfsClient extends Logging {

  val FS_DEFAULTFS = "fs.defaultFS"
  /**
   * 是否支持追加写文件
   */
  val DFS_SUPPORT_APPEND = "dfs.support.append"
  /**
   * 设置为org.apache.hadoop.hdfs.DistributedFileSystem.
   * 解决：java.io.IOException: Not supported
   */
  val FS_HDFS_IMPL = "fs.hdfs.impl"
  /**
   * 当任务提交到集群上面以后,多个data node在getFileSystem过程中,由于Configuration一样,会得到同一个FileSystem.
   * 如果有一个data node在使用完关闭连接,其它的data node在访问就会出现异常:
   * Error: java.io.IOException: Filesystem closed
   * FileSystem类内部有一个static CACHE,用来保存每种文件系统的实例集合.
   * FileSystem类中可以通过"fs.%s.impl.disable.cache"来指定是否缓存FileSystem实例(其中%s替换为相应的scheme，比如hdfs、local、s3、s3n等).
   * 即为true时,一旦创建了相应的FileSystem实例,这个实例将会保存在缓存中,此后每次get都会获取同一个实例。
   */
  val FS_HDFS_IMPL_DISABLE_CACHE = "fs.hdfs.impl.disable.cache"

  def apply(conf: Configuration): HdfsClient = {
    val func = () => {
      val fs = FileSystem.get(conf)
      sys.addShutdownHook{
        info(s"Execute hook thread: $name")
        fs.close()
      }
      fs
    }
    new HdfsClient(func)
  }

  implicit def copExtend(client: HdfsClient): HdfsIO = new HdfsIO(client)

  object implicits { implicit def convertPath(path: String): Path = new Path(path) }
}
