package cn.getech.data.development.sink.hdfs

import java.lang

import cn.getech.data.development.sink.jdbc.config.CustomTableConfig
import cn.getech.data.development.utils.HDFSSinkUtils
import org.apache.flink.api.common.functions.FilterFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.tuple
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.datastream.{DataStream, DataStreamSink}
import org.apache.flink.table.api.TableSchema
import org.apache.flink.table.sinks.{RetractStreamTableSink, TableSink}
import org.apache.flink.table.utils.TableConnectorUtils
import org.apache.flink.types.Row

/**
 * hdfs数据写入
 * @param conf
 */
@deprecated
class FlinkStreamHDFSRetractSink(conf: CustomTableConfig) extends RetractStreamTableSink[Row] {
  private val tableSchema: TableSchema = TableSchema.builder().fields(conf.param_field_names, conf.param_class_type2DataType).build()
  val path = new Path(conf.getHDFSUri)

  // 消费数据
  override def consumeDataStream(dataStream: DataStream[tuple.Tuple2[lang.Boolean, Row]]): DataStreamSink[_] = {
//    dataStream.print()
    HDFSSinkUtils(conf)
      .sink(
        // 回撤过滤, 只写入true的数据
        dataStream.filter(new FilterFunction[tuple.Tuple2[lang.Boolean, Row]] {
          override def filter(t: tuple.Tuple2[lang.Boolean, Row]): Boolean = t.f0
        })
      )
      .setParallelism(dataStream.getParallelism)
      .name(TableConnectorUtils.generateRuntimeName(this.getClass, getFieldNames))
  }

  override def getRecordType: TypeInformation[Row] = new RowTypeInfo(tableSchema.getFieldTypes(), tableSchema.getFieldNames())

  override def configure(strings: Array[String], typeInformations: Array[TypeInformation[_]]): TableSink[tuple.Tuple2[lang.Boolean, Row]] = ???

  override def getTableSchema: TableSchema = tableSchema
}
