package com.ywps.vaas.framework.util

import com.ywps.vaas.framework.conf.ConfigTools
import com.ywps.vaas.framework.constant.{PropertiesConstant, StanderConstant}
import org.apache.spark.sql.{Dataset, SaveMode}

import java.util.Properties

object HdfsUtil {

  val properties: Properties = ConfigTools.getPropertiesFactory()
  val basePath: String = properties.getProperty(PropertiesConstant.SPARK_SQL_WRITE_EXTERNAL)
  /**
   * dataSet写入hdfs
   * @param ds dataset数据集
   * @param path 保存路径或者目录
   * @param saveMode 保存模式 append  overwrite
   * @param isCompression 是否开启压缩
   * @tparam T
   */
  def writeHdfs[T](ds:Dataset[T],path:String,saveMode:String,isCompression:Boolean): Unit ={
    isCompression match {
      case true=> ds.write.mode(saveMode).option(StanderConstant.COMPRESSION, StanderConstant.COMPRESSION_TYPE_BZIP2).format(StanderConstant.COMPRESSION_FILE_TYPE_CSV).save(basePath+path)
      case _=>ds.write.mode(saveMode).format(StanderConstant.COMPRESSION_FILE_TYPE_CSV).save(basePath+path)
    }
  }

  /**
   * dataset写入hdfs
   * @param ds dataset数据集
   * @param path 写入目录或路径
   * @param saveMode 保存模式  append overwrite
   * @tparam T
   */
  def writeHdfs[T](ds:Dataset[T],path:String,saveMode:String): Unit ={
    ds.write.mode(saveMode).option(StanderConstant.COMPRESSION, StanderConstant.COMPRESSION_TYPE_BZIP2).format(StanderConstant.COMPRESSION_FILE_TYPE_CSV).save(basePath+path)
  }

  /**
   * dataset写入hdfs
   * @param ds dataset数据集
   * @param path 保存路径或目录
   * @tparam T dataset元素类型
   */
  def writeHdfs[T](ds:Dataset[T],path:String): Unit ={
    ds.write.mode(SaveMode.Append).option(StanderConstant.COMPRESSION, StanderConstant.COMPRESSION_TYPE_BZIP2).format(StanderConstant.COMPRESSION_FILE_TYPE_CSV).save(basePath+path)
  }
}
