package cn.bawei.shop.realtime.etl.`trait`
import cn.bawei.canal.bean.RowData
import cn.bawei.shop.realtime.etl.utils.{CanalRowDataDeserialzerSchema, GlobalConfigUtil, KafkaProps}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
import org.apache.flink.streaming.api.scala._

abstract class MysqlBaseETL(env:StreamExecutionEnvironment)  extends BaseETL[RowData] {
  /**
   * 从kafka中读取数据，传递返回的数据类型
   *
   * @param topic
   * @return
   */
  override def getKafkaDataSream(topic: String = GlobalConfigUtil.`input.topic.canal`): DataStream[RowData] = {
    //消费的是kafka的canal数据，而binlog日志进行了protobuf的序列化，所以读取到的数据需要进行反序列化
    val canalKafkaConsumer: FlinkKafkaConsumer011[RowData] = new FlinkKafkaConsumer011[RowData](
      topic,
      //这个对象是自定义的反序列化对象，可以解析kafka写入到protobuf格式的数据
      new CanalRowDataDeserialzerSchema(),
      KafkaProps.getKafkaProperties()
    )

    //将消费者实例添加到环境中
    val canalDataStream: DataStream[RowData] = env.addSource(canalKafkaConsumer)
    canalDataStream.print()
    //返回消费到的数据
    canalDataStream
  }

}
