package cn.getech.data.development.sink.sync

import cn.getech.data.development.bean.FlinkStreamSyncHiveObj
import cn.getech.data.development.function.HDFSJsonSyncMapFunction
import cn.getech.data.development.source.{FlinkRealtimeCollectCSVSource, FlinkRealtimeCollectJsonSource}
import cn.getech.data.development.utils.FlinkJDBCAnalysisUtils
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.types.Row

class FlinkRealtimeCollectKudu(obj: FlinkStreamSyncHiveObj) extends FlinkRealtimeCollectTrait[Row] {

  val config = FlinkJDBCAnalysisUtils.hdfsConf(obj.jdbc, 1024 * 1024 * 128)
  /**
    * 1. 覆盖操作
    * 2. source
    */
  override def overwriteTable: Unit = ???

  /**
    * json sink
    */
   def sinkJson(env: StreamExecutionEnvironment): Unit = {
     val stream = new FlinkRealtimeCollectJsonSource(env, obj).getKafkaDataStream.uid(obj.jobName + "_" + obj.kafkaResource.topic + "_" + System.currentTimeMillis())
     // json 单独做处理
    val value = stream.map(new HDFSJsonSyncMapFunction(config))
     value.print("=========================== ")
     value.addSink(FlinkJDBCAnalysisUtils.batchKuduSink(obj.jdbc, 1024 * 1024 * 128)).name(obj.jdbc.sinkTableName).uid(obj.jobName + "_" + obj.kafkaResource.topic + "_" + System.currentTimeMillis())
   }

  /**
    * text csv sink
    */
  override def sinkCsv(tEnv: StreamTableEnvironment): Unit = {
    val stream = new FlinkRealtimeCollectCSVSource(tEnv, obj).getKafkaDataStream
    stream.addSink(FlinkJDBCAnalysisUtils.batchKuduSink(obj.jdbc, 1024 * 1024 * 128)).name(obj.jdbc.sinkTableName)
  }

}
