package com.shujia.flink.sql

import org.apache.flink.table.api.{EnvironmentSettings, TableEnvironment}

object Demo7kafkaSInkUpdate {
  def main(args: Array[String]): Unit = {
    val settings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .inStreamingMode()
      //.inBatchMode()
      .build()
    /**
     * flink sql环境
     *
     */
    val table: TableEnvironment = TableEnvironment.create(settings)

    table.executeSql(
      """
        |CREATE TABLE student_kafka (
        |    id STRING,
        |    name STRING,
        |    age INT,
        |    gender STRING,
        |    clazz STRING
        |) WITH (
        |  'connector' = 'kafka',
        |  'topic' = 'student',
        |  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        |  'properties.group.id' = 'testGroup',
        |  'scan.startup.mode' = 'earliest-offset',
        |  'format' = 'csv',
        |  'csv.field-delimiter'=',', -- csv格式数据的分隔符
        |  'csv.ignore-parse-errors'='true', -- 如果出现脏数据据,补null
        |  'csv.allow-comments'='true'--跳过#注释行
        |)
        |""".stripMargin)

    table.executeSql(
      """
        |CREATE TABLE gender_num_sink (
        |    gender STRING,
        |	num BIGINT,
        |    PRIMARY KEY (gender) NOT ENFORCED-- 设置唯一主键
        |) WITH (
        |  'connector' = 'upsert-kafka',
        |  'topic' = 'gender_num',
        |  'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        |  'key.format' = 'csv',
        |  'value.format' = 'csv'
        |)
        |""".stripMargin)

    table.executeSql(
      """
        |insert into gender_num_sink
        |select gender,count(1) as num
        |from student_kafka
        |where gender is not null
        |group by gender
        |
        |""".stripMargin)
  }

}
