package com.ada.flink

import com.ada.flink.bean.StartUpLog
import com.ada.flink.util.MyKafkaUtil
import com.alibaba.fastjson.JSON
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011
import org.apache.flink.table.api.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.api.{Table, TableEnvironment}

/**
  * Table API是流处理和批处理通用的关系型API，Table API可以基于流输入或者批输入来运行而不需要进行任何修改。Table API是SQL语言的超集并专门为Apache Flink设计的，Table API是Scala 和Java语言集成式的API。与常规SQL语言中将查询指定为字符串不同，Table API查询是以Java或Scala中的语言嵌入样式来定义的，具有IDE支持如:自动完成和语法检测。
  */
object TableApiApp3 {
    def main(args: Array[String]): Unit = {
        //sparkcontext
        val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

        //时间特性改为eventTime
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)

        val myKafkaConsumer: FlinkKafkaConsumer011[String] = MyKafkaUtil.getConsumer("GMALL_STARTUP")
        val dstream: DataStream[String] = env.addSource(myKafkaConsumer)

        val startupLogDstream: DataStream[StartUpLog] = dstream.map { jsonString => JSON.parseObject(jsonString, classOf[StartUpLog]) }
        //告知watermark 和 eventTime如何提取
        val startupLogWithEventTimeDStream: DataStream[StartUpLog] = startupLogDstream.assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor[StartUpLog](Time.seconds(0L)) {
            override def extractTimestamp(element: StartUpLog): Long = {
                element.ts
            }
        }).setParallelism(1)

        //SparkSession
        val tableEnv: StreamTableEnvironment = TableEnvironment.getTableEnvironment(env)

        //把数据流转化成Table
        val startupTable: Table = tableEnv.fromDataStream(startupLogWithEventTimeDStream, 'mid, 'uid, 'appid, 'area, 'os, 'ch, 'logType, 'vs, 'logDate, 'logHour, 'logHourMinute, 'ts.rowtime)

        //通过table api 进行操作
        // 每10秒 统计一次各个渠道的个数 table api 解决
        //1 groupby  2 要用 window   3 用eventtime来确定开窗时间
        val resultTable: Table = startupTable.window(Tumble over 10000.millis on 'ts as 'tt).groupBy('ch, 'tt).select('ch, 'ch.count)
        // 通过sql 进行操作

        val resultSQLTable: Table = tableEnv.sqlQuery("select ch ,count(ch)   from " + startupTable + "  group by ch   ,Tumble(ts,interval '10' SECOND )")

        //把Table转化成数据流
        val resultDstream: DataStream[(Boolean, (String, Long))] = resultSQLTable.toRetractStream[(String, Long)]

        resultDstream.filter(_._1).print()

        env.execute()
    }
}
