package com.ada.flink

import com.ada.flink.bean.StartUpLog
import com.ada.flink.util.MyKafkaUtil
import com.alibaba.fastjson.JSON
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
import org.apache.flink.table.api.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.api.{Table, TableEnvironment}

/**
  * Table API是流处理和批处理通用的关系型API，Table API可以基于流输入或者批输入来运行而不需要进行任何修改。Table API是SQL语言的超集并专门为Apache Flink设计的，Table API是Scala 和Java语言集成式的API。与常规SQL语言中将查询指定为字符串不同，Table API查询是以Java或Scala中的语言嵌入样式来定义的，具有IDE支持如:自动完成和语法检测。
  */
object TableApiApp {
    def main(args: Array[String]): Unit = {
        val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

        val myKafkaConsumer: FlinkKafkaConsumer011[String] = MyKafkaUtil.getConsumer("GMALL_STARTUP")
        val dstream: DataStream[String] = env.addSource(myKafkaConsumer)

        val tableEnv: StreamTableEnvironment = TableEnvironment.getTableEnvironment(env)

        val startupLogDstream: DataStream[StartUpLog] = dstream.map { jsonString => JSON.parseObject(jsonString, classOf[StartUpLog]) }

        /*
        动态表:
        如果流中的数据类型是case class可以直接根据case class的结构生成table
         */
        val startupLogTable: Table = tableEnv.fromDataStream(startupLogDstream)

        val table: Table = startupLogTable.select("mid,ch").filter("ch ='appstore'")

        val midchDataStream: DataStream[(String, String)] = table.toAppendStream[(String, String)]

        midchDataStream.print()
        env.execute()
    }
}
