package com.gitee.dufafei.spark.connector.hdfs

import org.apache.spark.TaskContext

import scala.collection.mutable.ArrayBuffer

class HdfsIO(client: HdfsClient) {

  import HdfsClient.implicits._

  /**
   * 为了减少SPARK分区写小文件的生成,可以按照此种策略创建文件并追加写入
   * @param path 需要写入的目录
   * @param threshold 阈值，达到此大小则新建文件
   * @return 文件名
   */
  def getPartitionFile(path: String, threshold: Long = 128 * 1024 * 1024L): String = {
    val catalog = if(path.endsWith("/")) path else path.concat("/")
    val prefix = TaskContext.getPartitionId() + "-"
    val list = client.fs.listFiles(catalog, false)
    val index = new ArrayBuffer[Int]()
    while (list.hasNext) {
      val r = (prefix + "([1-9]\\d*$)").r
      list.next().getPath.getName match {
        case r(num) => index.append(num.toInt)
        case _ =>
      }
    }
    val fillPath = catalog + prefix
    if(index.isEmpty) {
      val fullPath = fillPath + 1
      client.fs.createNewFile(fullPath)
      fullPath
    } else {
      val fullPath = fillPath + index.max
      val file = client.fs.getFileStatus(fullPath)
      // 达到阈值创建一个新文件
      if(file.getLen > threshold) {
        val fullPath = fillPath + (index.max + 1)
        client.fs.createNewFile(fullPath)
        fullPath
      } else {
        fullPath
      }
    }
  }
}
