package cn.getech.data.development.utils

import java.lang

import cn.getech.data.development.bean.jdbc.JDBCSqlParserResourceBean
import cn.getech.data.development.enums.CustomJDBCType
import cn.getech.data.development.sink.hdfs.FlinkStreamHDFSRetractSink
import cn.getech.data.development.sink.jdbc.config.CustomTableConfig
import cn.getech.data.development.sink.kudu.{FlinkStreamKuduRetractSink, FlinkStreamKuduSink, FlinkStreamKuduUpsertSink}
import org.apache.flink.api.java.tuple
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction
import org.apache.flink.table.api.TableSchema
import org.apache.flink.table.connector.sink.DynamicTableSink
import org.apache.flink.table.sinks.TableSink
import org.apache.flink.types.Row

object FlinkJDBCAnalysisUtils {

  /**
   * 获取upsert的sink
   *
   * @param jdbc      JDBCSqlParserResourceBean
   * @param batchSize batchSize
   * @return
   */
  def operatorUpsertSink(jdbc: JDBCSqlParserResourceBean, batchSize: Int = 1024 * 1024 * 64): TableSink[tuple.Tuple2[lang.Boolean, Row]] = {
//    def operatorUpsertSink(jdbc: JDBCSqlParserResourceBean, batchSize: Int = 1024 * 1024 * 64): DynamicTableSink[tuple.Tuple2[lang.Boolean, Row]] = {
    val sinkType = CustomJDBCType.withName(jdbc.typename)
    sinkType match {
//      case CustomJDBCType.impala =>
//        new FlinkStreamJDBCRetractSink(jdbcConf(jdbc, batchSize))
//      case CustomJDBCType.clickHouse =>
//        jdbcSinkFormat(jdbc)
      case CustomJDBCType.kudu =>
        new FlinkStreamKuduRetractSink(kuduConf(jdbc, batchSize))
      case CustomJDBCType.hive | CustomJDBCType.hdfs =>
        new FlinkStreamHDFSRetractSink(hdfsConf(jdbc, batchSize))
    }
  }

  /**
   * 实时采集kudu
   * @param jdbc model
   * @param batchSize batch size
   * @return
   */
  def batchKuduSink(jdbc: JDBCSqlParserResourceBean, batchSize: Int): RichSinkFunction[Row] = {
    new FlinkStreamKuduSink(kuduConf(jdbc, batchSize))
  }

  def batchKuduUpsertSink(jdbc: JDBCSqlParserResourceBean, batchSize: Int): RichSinkFunction[tuple.Tuple2[lang.Boolean, Row]] = {
    new FlinkStreamKuduUpsertSink(kuduConf(jdbc, batchSize))
  }

  def kuduConf(jdbc: JDBCSqlParserResourceBean, batchSize: Int): CustomTableConfig = {
    new CustomTableConfig()
      .sqlParams(jdbc.fields)
      .tableName(jdbc.tableName)
      .db(jdbc.db)
      .url(jdbc.url)
      .batchInterval(batchSize)
      .jdbcType(jdbc.typename)
  }

  /**
    * 实时采集
    * @param jdbc model
    * @param batchSize batch size
    * @return
    */
  def hdfsConf(jdbc: JDBCSqlParserResourceBean, batchSize: Int): CustomTableConfig = {
    new CustomTableConfig()
      .tableName(jdbc.tableName)
      .sqlParams(jdbc.fields)
      .db(jdbc.db)
      .partitionTable(jdbc.isPartitionTable)
      .dynamicPartition(jdbc.isDynamicPartition)
      .collectionConfig(jdbc.collectionConfig)
      .batchInterval(batchSize)
      .jdbcType(jdbc.typename)
      .hdfs_delimitFormat(jdbc.delimitFormat)
      .hdfs_format(jdbc.outputFormat)
  }

  def jdbcConf(jdbc: JDBCSqlParserResourceBean, batchSize: Int = 0): CustomTableConfig = {
    new CustomTableConfig()
      .sqlParams(jdbc.fields)
      .tableName(jdbc.tableName)
      .db(jdbc.db)
      .url(jdbc.url)
      .username(jdbc.username)
      .password(jdbc.password)
      .partitionTable(jdbc.isPartitionTable)
      .dynamicPartition(jdbc.isDynamicPartition)
      .collectionConfig(jdbc.collectionConfig)
      .batchInterval(batchSize)
      .jdbcType(jdbc.typename)
  }

}
