package streamLogStatic.handle

import java.util.Properties

import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer010, Kafka010TableSource}
import org.apache.flink.table.api.java.StreamTableEnvironment
import streamLogStatic.pojos.{weblog, weblogDesc}
import org.apache.flink.table.api.Table

/**
  *
  * @ObjectName kafkatoHbaseHandle
  * @Description 接入kafka数据源并将数据写入hbase
  * @Author dalong
  * @Date 2020/6/15 22:01
  * @Version 1.0
  **/

object kafkatoHbaseHandle {

  def main(args: Array[String]): Unit = {

    val params = ParameterTool.fromArgs(args)

    /*是否是测试模式,默认否     --isdebug true  开启debug模式*/
    val str = params.get("isdebug","false").toBoolean


    val env = StreamExecutionEnvironment.getExecutionEnvironment()

    /*构建table环境*/
    val tableEnv = StreamTableEnvironment.create(env)

    val properties = new Properties()
    properties.setProperty("bootstrap.servers","localhost:9092")
    properties.setProperty("group.id","weblog")
    /*构建kafka数据源，使用自定义数据结构解析kafka数据*/
    val kafkaconsumer = new FlinkKafkaConsumer010[weblog]("click_events",new weblogDesc,properties)
    kafkaconsumer.setStartFromEarliest()
    val kafkasource = env.addSource(kafkaconsumer)

    val kafkatbl = tableEnv.fromDataStream(kafkasource)

    /*注册成表*/
    tableEnv.registerTable("weblogTbl",kafkatbl)

    val streamtbl:Table = tableEnv.sqlQuery("select * from weblogTbl")

    //streamtbl.writeToSink(new CsvTableSink("/Users/dalong/Downloads/temp/weblog",",",1,WriteMode.NO_OVERWRITE))

    /*执行job*/
    env.execute("kafkaweblogJob")

  }
}
