package com.shujia.flink.sql

import org.apache.flink.table.api.{EnvironmentSettings, TableEnvironment}

object Demo6ClazzNum {
  def main(args: Array[String]): Unit = {
    /**
     * 1、创建flink sql环境
     */
    val settings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .inStreamingMode() //流处理模式
      //.inBatchMode()//批处理模式
      .build()

    val tableENV: TableEnvironment = TableEnvironment.create(settings)

    tableENV.executeSql(
      """
        |CREATE TABLE kafka_source (
        |    id STRING ,
        |    name STRINg ,
        |    age INT,
        |    gender STRING ,
        |    clazz STRING
        |) WITH (
        |    'connector' = 'kafka', -- 数据源吧类型
        |    'topic' = 'student_json', -- topic
        |    'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        |    'properties.group.id' = 'testGroup', -- 消费者组
        |    'scan.startup.mode' = 'earliest-offset', -- 读取数据的位置
        |    'format' = 'json', -- 读取数据的格式  csv（顺序映射） , json(同名映射)
        |    'json.ignore-parse-errors' = 'true' -- 如果json解析异常ti'a
        |)
        |
        |
        |""".stripMargin)


    tableENV.executeSql(
      """
        |CREATE TABLE kafka_sink_on_upsert (
        |    clazz STRING,
        |    num BIGINT,
        |    PRIMARY KEY (clazz) NOT ENFORCED
        |) WITH (
        |  'connector' = 'upsert-kafka', -- upsert-kafka: 撤回的kafka
        |  'topic' = 'clazz_num',
        |  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        |  'key.format' = 'json',
        |  'value.format' = 'json'
        |)
        |""".stripMargin)

    tableENV.executeSql(
      """
        |-- 统计班级的人数，将统计结果写入kafka
        |insert into kafka_sink_on_upsert
        |select
        |clazz,count(1) as num
        |from
        |kafka_source
        |group by clazz
        |""".stripMargin)
  }

}
