package com.shujia.flink.sql

import org.apache.flink.table.api.{EnvironmentSettings, Table, TableEnvironment}
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._

object Demo2StreamSQL {
  def main(args: Array[String]): Unit = {

    //环境设置对象
    val settings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      //指定处理模式
      .inStreamingMode() //流处理模式
      //.inBatchMode()//批处理模式
      .build()

    //flink sql的执行环境
    val tEnv: TableEnvironment = TableEnvironment.create(settings)

    /**
     * 1、在流上定义表  --- flink kafka source表    -- source 表
     *
     */

    tEnv.executeSql(
      """
        |CREATE TABLE student (
        |  id STRING,
        |  name STRING,
        |  age INT,
        |  sex STRING,
        |  clazz STRING
        |) WITH (
        |  'connector' = 'kafka', -- 数据源为kafka
        |  'topic' = 'students', -- topic
        |  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092', -- KafkA集群列表
        |  'properties.group.id' = 'testGroup',-- 消费者组
        |  'scan.startup.mode' = 'earliest-offset',-- 读取数据的位置
        |  'format' = 'csv' -- 数据格式
        |)
        |""".stripMargin)

    /**
     * 传概念一个用于保存结果的表  -- sink 表
     */

    tEnv.executeSql(
      """
        |CREATE TABLE print_table (
        | clazz STRING,
        | num BIGINT
        |) WITH (
        | 'connector' = 'print' -- 用于在命令行打印结果的连接器
        |)
        |
        |""".stripMargin)

    /**
     *
     * 2、在动态表上做连续查询
     */

    tEnv.executeSql(
      """
        |insert into print_table
        |select clazz,count(1) as num from
        |student
        |group by clazz
        |
        |""".stripMargin)


  }
}
