package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.types.Row

object Demo1WordCount {
  def main(args: Array[String]): Unit = {

    //创建flink的环境
    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode() //流模式
      .build()

    /**
      * 创建flink sql 环境
      *
      */

    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)


    /**
      * 创建source 表
      *
      * executeSql: 可以执行ddl ,sql
      *
      */

    bsTableEnv.executeSql(
      """
        |CREATE TABLE words (
        | word STRING
        |) WITH (
        | 'connector' = 'kafka',
        | 'topic' = 'words',
        | 'properties.bootstrap.servers' = 'master:9092',
        | 'properties.group.id' = 'asdsa',
        | 'format' = 'csv',
        | 'scan.startup.mode' = 'earliest-offset',
        | 'csv.ignore-parse-errors'='true'
        |)
        |
      """.stripMargin)

    /**
      * 基于上面的表进行查询
      *
      */

    val countTable: Table = bsTableEnv.sqlQuery(
      """
        |select word,count(1) from words group by word
        |
      """.stripMargin)


    //打印结果
    val ds: DataStream[(Boolean, Row)] = countTable.toRetractStream[Row]

    ds.print()

    //启动任务
    bsEnv.execute()


  }

}
