package com.shujia.sql

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment

object Demo5FlinkToKafka {
  def main(args: Array[String]): Unit = {

    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink的计划器
      .inStreamingMode() //使用流模型
      .build()

    //窗口table 环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)


    bsTableEnv.executeSql(
      """
        |
        |CREATE TABLE student_filesystem (
        |  id STRING,
        |  name STRING,
        |  age BIGINT,
        |  gender STRING,
        |  clazz STRING
        |)  WITH (
        |  'connector' = 'filesystem',
        |  'path' = 'data/students.json',
        |  'format' = 'json'
        |)
        |
      """.stripMargin)


    /**
      * kafka sink  表
      *
      *
      * 在将数据写入kafka的时候，普通的json和csv格式不能接收更新的流，
      * changelog-json;可以接收更新的流
      *
      */
    bsTableEnv.executeSql(
      """
        |
        |CREATE TABLE avg_age (
        | clazz STRING,
        | avgAge DOUBLE
        |) WITH (
        | 'connector' = 'kafka',
        | 'topic' = 'avg_age',
        | 'properties.bootstrap.servers' = 'master:9092',
        | 'format' = 'changelog-json'
        |)
        |
      """.stripMargin)


    bsTableEnv.executeSql(
      """
        |insert into avg_age
        |select clazz,avg(age) as avgAge from student_filesystem group by clazz
        |
      """.stripMargin)

  }

}
