package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.types.Row

object Demo1TableToStream {
  def main(args: Array[String]): Unit = {
    //flink  流处理的环境
    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    //设置flink  sql 配置
    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() // 使用blink 的计划器（解析sql）
      .inStreamingMode()
      .build()

    //创建table 环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)

    /**
      * executeSql 可以执行创建表的语句，也可以执行处理数据的语句
      *
      *
      * 读取数据：创建source表 （是一个动态表）
      */

    bsTableEnv.executeSql(
      """
        |CREATE TABLE lines (
        | line String
        |) WITH (
        | 'connector' = 'kafka',
        | 'topic' = 'lines',
        | 'properties.bootstrap.servers' = 'master:9092',
        | 'properties.group.id' = 'asdsadasd',
        | 'format' = 'csv',
        | 'scan.startup.mode' = 'earliest-offset',
        | 'csv.ignore-parse-errors' = 'true'
        |)
      """.stripMargin)

    //通过from方法把表转换成table对象
    val table: Table = bsTableEnv.from("lines")

    /**
      * 只存在insert的表才可以转换成append流
      */
//    val ds: DataStream[Row] = table.toAppendStream[Row]
//
//    ds.print()
//
//    bsEnv.execute()

    /**
      *
      * executeSql
      * 1、可以用于创建表
      * 2、可以执行insert into 操作
      *
      * sqlQuery
      * 执行select 语句，得到一个table
      *
      */

    val countTable: Table = bsTableEnv.sqlQuery(
      """
         select line,count(1) from lines group by line

      """.stripMargin)

    /**
      * 将select对象转化成RetractStream流
      */
    val countDS: DataStream[(Boolean, Row)] = countTable.toRetractStream[Row]

    countDS.print()

    bsEnv.execute()
  }
}
