package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.types.Row

object Demo1WordCount {
  def main(args: Array[String]): Unit = {
    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode() //使用流模型
      .build()

    //创建flink sql的执行环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)


    /**
      * 1、创建source 表--- 动态表
      *
      */

    bsTableEnv.executeSql(
      """
        |
        |CREATE TABLE lines (
        | word STRING
        |) WITH (
        | 'connector' = 'kafka',
        | 'topic' = 'lines',
        | 'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        | 'properties.group.id' = 'asdasdasd',
        | 'format' = 'csv',
        | 'csv.ignore-parse-errors' = 'true',
        | 'scan.startup.mode' = 'earliest-offset'
        |)
        |
        |
      """.stripMargin)

    /**
      * 创建sink 表
      * 需要现在mysql中创建wc表
      */

    bsTableEnv.executeSql(
      """
        |CREATE TABLE wc (
        |  word STRING,
        |  c BIGINT,
        |  PRIMARY KEY (word) NOT ENFORCED
        |) WITH (
        |   'connector' = 'jdbc',
        |   'url' = 'jdbc:mysql://master:3306/bigdata',
        |   'table-name' = 'wc',
        |   'username' = 'root',
        |   'password' = '123456'
        |)
        |
        |
        """.stripMargin)


    //统计单词的数量

    bsTableEnv.executeSql(
      """
        |insert into wc
        |select word,count(1) as c from lines group by word
        |
      """.stripMargin)


    //打印结果
    //    val ds: DataStream[(Boolean, Row)] = table.toRetractStream[Row]
    //
    //    ds.print()
    // bsEnv.execute()

  }

}

