package com.zt.bigdata.template.hdfs

import java.util.concurrent.TimeUnit

import org.apache.spark.internal.Logging
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.streaming.{OutputMode, Trigger}

object HDFSTemplate extends Logging with Serializable {

  /**
    * 持久化数据到HDFS
    *
    * @param ds
    * @param path
    * @param fileFormat 保存文件格式 默认TEXT
    * @param triggerIntervalMilliSec
    * @param outputMode 持久化模式 默认append
    */
  def sinkHDFSStream(ds: Dataset[_], path: String, fileFormat: String = FileFormat.TEXT, triggerIntervalMilliSec: Long = 5000,
                     outputMode: OutputMode = OutputMode.Append): Unit = {
    ds.writeStream
      .format(fileFormat)
      .option("path", path)
      .outputMode(outputMode)
      .trigger(Trigger.ProcessingTime(triggerIntervalMilliSec, TimeUnit.MILLISECONDS))
      .start()
  }

  object FileFormat extends Enumeration {
    val TEXT = Value("text").toString
    val CSV = Value("csv").toString
    val HIVE = Value("hive").toString
    val JSON = Value("json").toString
    val ORC = Value("orc").toString
    val PARQUET = Value("parquet").toString
    val LIBSVM = Value("libsvm").toString
  }

}
