package com.spark.util.client

import java.io.ByteArrayInputStream
import java.net.URI
import com.spark.util.core.{Borrow, Logging}
import org.apache.hadoop.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkContext, TaskContext}
import org.joda.time.{DateTime, Days}
import org.joda.time.format.DateTimeFormat

class HDFSClient(func:() => FileSystem) extends Borrow with Logging with Serializable {

  lazy val fs: FileSystem = func()

  def createFile(path:String): Boolean = fs.createNewFile(new Path(path))

  // 删除该路径上的文件，true 如果该文件是一个目录的话，则递归删除该目录
  def deleteFile(path:String, boolean: Boolean): Boolean = fs.delete(new Path(path), boolean)

  // 合并小于128M的小文件，大于128M的可以保留
  def mergeFiles(): Unit = {

  }

  def append(content:String, path:String): Unit = {
    val in = new ByteArrayInputStream((content+"\n").getBytes("UTF-8"))
    val out = fs.append(new Path(path))
    IOUtils.copyBytes(in, out,4096,true)
  }

  // 可以重复使用一个连接，减少每次获取对象建立连接的时间
  def batchAppend(path:String, content:List[String]): Unit = {
    val out = fs.append(new Path(path))
    using(out){ out =>
      content.foreach{ line =>
        val in = new ByteArrayInputStream((line+"\n").getBytes("UTF-8"))
        using(in){in =>
          IOUtils.copyBytes(in,out,4096,false)
        }
      }
    }
  }

  // 根据文件大小判断是否需要创建新文件
  def a(schema: String, partitionField: String): Unit = {
    val partitionId = TaskContext.getPartitionId()
    val fileNameMatch = s"$schema/$partitionField"
  }

  def getTimePath(sc:SparkContext, rootPath:String, start:String, interval:Int): String = {
    val startTime:DateTime = DateTime.parse(start,DateTimeFormat.forPattern("yyyyMMdd"))
    val endTime: DateTime = startTime.plusDays(interval)
    (0 until Days.daysBetween(startTime, endTime).getDays).map{ x =>
      val secondDire: DateTime = startTime.plusDays(x)
      rootPath+"/"+secondDire.toString("yyyyMMdd")+".log"
    }.filter( x => FileSystem.get(sc.hadoopConfiguration).exists(new Path(x))).mkString(",")
  }
}

object HDFSClient extends Logging {

  def apply(url:String,conf:Configuration): HDFSClient = {
    val func = () => {
      val fs = FileSystem.get(URI.create(url),conf)
      sys.addShutdownHook{
        warn("Execute hook thread: HDFSSink")
        fs.close()
      }
      fs
    }
    new HDFSClient(func)
  }
}
