package com.shujia.flink.sql

import org.apache.flink.table.api.{EnvironmentSettings, TableEnvironment}

object Demo2ClazzNumOnStream {
  def main(args: Array[String]): Unit = {
    /**
     * 1、创建flink sql环境
     */
    val settings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .inStreamingMode() //流处理模式
      //.inBatchMode()//批处理模式
      .build()

    val tableENV: TableEnvironment = TableEnvironment.create(settings)

    /**
     * 1、在流上定义表 --- kafka source 表
     */

    tableENV.executeSql(
      """
        |CREATE TABLE kafka_student (
        |  `id` STRING,
        |  `name` STRING,
        |  `age` int,
        |  `gender` STRING,
        |  `clazz` STRING
        |) WITH (
        |  'connector' = 'kafka', -- 指定链接数据的类型
        |  'topic' = 'student', -- 指定读取kafka的topic
        |  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092', -- kafka broker列表
        |  'properties.group.id' = 'testGroup', -- 消费者组
        |  'scan.startup.mode' = 'earliest-offset', -- 读取数据的位置,earliest-offset:读取所有数据
        |  'format' = 'csv' -- 读取数据的格式,csv默认时逗号分隔,会自动解析数据,会安装数据的顺序和上面的字段映射
        |)
        |""".stripMargin)

    /**
     * 2、创建sink表
     * print_table: 控制台打印结果表
     */
    tableENV.executeSql(
      """
        |CREATE TABLE print_table (
        | clazz STRING,
        | num BIGINT
        |) WITH (
        | 'connector' = 'print'
        |)
        |""".stripMargin)

    /**
     * 2、在动态表中计算连续查询
     */
    tableENV.executeSql(
      """
        |insert into print_table
        |select clazz,count(1) as num from
        |kafka_student
        |group by clazz
        |
        |""".stripMargin)

  }

}
