package cn.getech.data.development.utils

import java.lang

import cn.getech.data.development.enums.OutputFormat
import cn.getech.data.development.function._
import cn.getech.data.development.sink.jdbc.config.CustomTableConfig
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.java.tuple
import org.apache.flink.core.fs.Path
import org.apache.flink.formats.parquet.avro.ParquetAvroWriters
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator
import org.apache.flink.streaming.api.functions.sink.filesystem.{OutputFileConfig, StreamingFileSink}
import org.apache.flink.streaming.api.scala.{DataStream, _}
import org.apache.flink.types.Row
import org.apache.flink.util.Preconditions
import org.slf4j.{Logger, LoggerFactory}

/**
 * hdfs sink
 */
object HDFSSinkUtils {
  private var conf: CustomTableConfig = new CustomTableConfig
  private var path: Path = null
  private val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply(config: CustomTableConfig): HDFSSinkUtils.type = {
    conf = config
    path = new Path(conf.getHDFSUri)
    logger.info(s"insert path: $path")
    this
  }

  /**
   * 有回撤的流
   *
   * @param ds dataStream
   */
  @deprecated
  def sink(ds: SingleOutputStreamOperator[tuple.Tuple2[lang.Boolean, Row]]) = {
    Preconditions.checkNotNull(conf)
    conf.format match {
      case OutputFormat.json => jsonSink(ds)
      case OutputFormat.text => textSink(ds)
      case OutputFormat.parquet => parquetSink(ds)
    }
  }


    /**
     * 无回撤的流
     *
     */
    def sink(ds: DataStream[Row]) = {
      Preconditions.checkNotNull(conf)
      conf.format match {
        case OutputFormat.json => ds.addSink(jsonSink)
        case OutputFormat.text => ds.addSink(textSink)
        case OutputFormat.parquet => ds.map(new HDFSParquetStringMapFunction(conf)).addSink(parquetSink)
      }
    }


  /**
   * 回撤流的json
   *
   * @param ds dataStream
   */
  def jsonSink(ds: SingleOutputStreamOperator[tuple.Tuple2[lang.Boolean, Row]]) = {
    ds.map(new MapFunction[tuple.Tuple2[lang.Boolean, Row], Row] {
      override def map(value: tuple.Tuple2[lang.Boolean, Row]): Row = value.f1
    }).addSink(StreamingFileSink.forBulkFormat[Row](path, new HDFSBulkFormatJsonFactory(conf))
      .withBucketAssigner(new HDFSBulkCSVAssigner(conf))
      .withOutputFileConfig(
        OutputFileConfig
          .builder()
          .withPartPrefix(System.currentTimeMillis().toString)
          .build())
      .build()
    )
  }

  /**
   * 无回撤流的json
   */
  def jsonSink = {
    StreamingFileSink.forBulkFormat[Row](path, new HDFSBulkFormatJsonFactory(conf))
      .withBucketAssigner(new HDFSBulkCSVAssigner(conf))
      .withOutputFileConfig(
        OutputFileConfig
          .builder()
          .withPartPrefix(System.currentTimeMillis().toString)
          .build())
      .build()
  }

  /**
   * 回撤流的text
   *
   * @param ds dataStream
   */
  def textSink(ds: SingleOutputStreamOperator[tuple.Tuple2[lang.Boolean, Row]]) = {
    ds.map(new MapFunction[tuple.Tuple2[lang.Boolean, Row], Row] {
      override def map(value: tuple.Tuple2[lang.Boolean, Row]): Row = value.f1
    }).addSink(StreamingFileSink.forBulkFormat[Row](path, new HDFSBulkFormatCSVFactory(conf))
      .withBucketAssigner(new HDFSBulkCSVAssigner(conf))
      .withOutputFileConfig(
        OutputFileConfig
          .builder()
          .withPartPrefix(System.currentTimeMillis().toString)
          .build())
      .build())
  }

  /**
   * 无回撤流的text
   *
   */
  def textSink = {
    StreamingFileSink.forBulkFormat[Row](path, new HDFSBulkFormatCSVFactory(conf))
      .withBucketAssigner(new HDFSBulkCSVAssigner(conf))
      .withOutputFileConfig(
        OutputFileConfig
          .builder()
          .withPartPrefix(System.currentTimeMillis().toString)
          .build())
      .build()
  }

  /**
   * 回撤流的parquet
   *
   * @param ds dataStream
   */
  def parquetSink(ds: SingleOutputStreamOperator[tuple.Tuple2[lang.Boolean, Row]]) = {
    ds.map(new HDFSParquetMapFunction(conf)).addSink(parquetBuilder)
  }

  /**
   * 无回撤流的parquet
   *
   */
  def parquetSink = {
    parquetBuilder
  }

  def parquetBuilder = {
    StreamingFileSink
      .forBulkFormat(path, ParquetAvroWriters.forGenericRecord(AVSCFileUtils.schema(conf.partitionParam(false))))
      .withBucketAssigner(new HDFSBulkParquetAssigner(conf))
      .withOutputFileConfig(
        OutputFileConfig
          .builder()
          .withPartPrefix(System.currentTimeMillis().toString)
          .build())
      .build()
  }


}
