package cn.getech.data.development.sink.sync

import cn.getech.data.development.bean.FlinkStreamSyncHiveObj
import cn.getech.data.development.function.{HDFSJsonSyncMapFunction, HDFSParquetStringMapFunction}
import cn.getech.data.development.source.{FlinkRealtimeCollectCSVSource, FlinkRealtimeCollectJsonSource}
import cn.getech.data.development.utils.{FlinkJDBCAnalysisUtils, HDFSSinkUtils, HiveUtils}
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.types.Row

class FlinkRealtimeCollectHive(obj: FlinkStreamSyncHiveObj) extends FlinkRealtimeCollectTrait[Row] {

  /**
   * 1. 覆盖操作
   * 2. source
   */
  override def overwriteTable: Unit = {
    HiveUtils.open(obj.jdbc)
  }

  /**
   * json sink
   */
  def sinkJson(env: StreamExecutionEnvironment): Unit = {
    val jsonSource = new FlinkRealtimeCollectJsonSource(env, obj)

    // json单独做处理
    val config = FlinkJDBCAnalysisUtils.hdfsConf(obj.jdbc, 1024 * 1024 * 128)
    val value = jsonSource
      .getKafkaDataStream
      .uid(obj.jobName + "_" + obj.kafkaResource.topic + "_" + System.currentTimeMillis())
      .map(new HDFSJsonSyncMapFunction(config))
    HDFSSinkUtils(config)
      .sink(value)
      .name(obj.jdbc.sinkTableName)
      .uid(obj.jobName + "_" + obj.jdbc.tableName + "_" + System.currentTimeMillis())
  }

  /**
   * text csv sink
   */
  override def sinkCsv(tEnv: StreamTableEnvironment): Unit = {
    val config = FlinkJDBCAnalysisUtils.hdfsConf(obj.jdbc, 1024 * 1024 * 128)
    val stream = new FlinkRealtimeCollectCSVSource(tEnv, obj).getKafkaDataStream.uid(obj.jobName + "_" + obj.kafkaResource.topic + "_" + System.currentTimeMillis())
    HDFSSinkUtils(config)
      .sink(stream)
      .uid(obj.jobName + "_" + obj.jdbc.tableName + "_" + System.currentTimeMillis())
      .name(obj.jdbc.sinkTableName)
  }

}
