package com.haoxin.stream

import com.haoxin.stream.base.FlinkTableEnv
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.connector.kafka.source.KafkaSource
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer
import org.apache.flink.streaming.api.scala._

import java.util.Properties

class LogEtl extends FlinkTableEnv {

  override protected def execute(args: Array[String]): Unit = {
    val conf = ParameterTool.fromArgs(args)
    env.getConfig.setGlobalJobParameters(conf)


    val properties = new Properties()
    properties.setProperty("bootstrap.servers", "localhost:9092")
    properties.setProperty("group.id", "test")
    properties.setProperty("enable.auto.commit", "true"); // 启用自动提交
    properties.setProperty("auto.commit.interval.ms", "1000"); // 设置自动提交间隔

    val kafkaSource = KafkaSource.builder[String]
      .setProperties(properties)
      .setTopics("rtc_sdk_stream_logs")
      .setStartingOffsets(OffsetsInitializer.earliest())
      .setValueOnlyDeserializer(new SimpleStringSchema())
      .build()

    val stream: DataStream[String] = env
      .fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "Kafka Source")


    stream.print() // 处理数据，这里只是打印

    env.execute("")
  }
}

object LogEtl {
  def main(args: Array[String]): Unit = {
    new LogEtl().run(args)
  }
}
