package com.shujia.flink.table

import org.apache.flink.table.api.{EnvironmentSettings, TableEnvironment}

object Demo1WordCount {
  def main(args: Array[String]): Unit = {

    /**
     * 创建flink sql环境
     *
     */
    val settings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .inStreamingMode() //流处理模式
      //.inBatchMode()//批处理模式
      .build()

    val table: TableEnvironment = TableEnvironment.create(settings)

    /**
     * 再流上定义表---- 定义table  source表 (kafka source表)
     *
     * 读取kafka需要导入依赖
     */

    table.executeSql(
      """
        |CREATE TABLE words (
        |  word STRING
        |) WITH (
        |  'connector' = 'kafka', --- 链接方式为kafka
        |  'topic' = 'lines', -- topic
        |  'properties.bootstrap.servers' = 'master:9092', --- kafka集群列表
        |  'properties.group.id' = 'asdsadasd',-- 消费者组
        |  'scan.startup.mode' = 'latest-offset',--读取数据的位置
        |  'format' = 'csv',--数据的格式
        |  'csv.field-delimiter' ='\t' -- 字段分隔符
        |)
        |""".stripMargin)

    /**
     * 需要将连续查询的结果写入到print表中查看结果
     *
     */
    table.executeSql(
      """
        |CREATE TABLE print_table (
        | word STRING,
        | c bigint
        |) WITH (
        | 'connector' = 'print'
        |)
        |""".stripMargin)


    /**
     * 再动态表上进行连续查询
     *
     */

    table.executeSql(
      """
        |insert into print_table
        |select word,count(1) as c
        |from
        |words
        |group by word
        |
        |""".stripMargin)


  }

}
