package com.shujia.flink.sql

import org.apache.flink.table.api.{EnvironmentSettings, TableEnvironment}

object Demo4KafkaSource {
  def main(args: Array[String]): Unit = {
    val settings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .inStreamingMode()
      //.inBatchMode()
      .build()
    /**
     * flink sql环境
     *
     */
    val table: TableEnvironment = TableEnvironment.create(settings)


    /**
     * kafka source表
     */
    table.executeSql(
      """
        |
        |CREATE TABLE student_kafka (
        |    id STRING,
        |    name STRING,
        |    age INT,
        |    gender STRING,
        |    clazz STRING
        |) WITH (
        |  'connector' = 'kafka',
        |  'topic' = 'student',
        |  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        |  'properties.group.id' = 'testGroup',
        |  'scan.startup.mode' = 'earliest-offset',
        |  'format' = 'csv',
        |  'csv.field-delimiter'=',', -- csv格式数据的分隔符
        |  'csv.ignore-parse-errors'='true', -- 如果出现脏数据据,补null
        |  'csv.allow-comments'='true'--跳过
        |)
        |
        |
        |""".stripMargin)

    /**
     * print sink 表
     */

    table.executeSql(
      """
        |CREATE TABLE print_table
        |(
        |clazz STRING,
        |num BIGINT
        |)
        |WITH ('connector' = 'print')
        |
        |""".stripMargin)


    table.executeSql(
      """
        |insert into print_table
        |select clazz,count(1) as num from
        |student_kafka
        |where clazz is not null
        |group by clazz
        |
        |""".stripMargin)


  }

}
