package com.csw.flink.sql

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api.{EnvironmentSettings, Table}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.types.Row

object Demo01WordCount {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode()
      .build()

    /**
      * 构建table 环境
      */
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env, bsSettings)

    /**
      * 创建source表
      * 表的元数据放在jobmanager的内存中
      *
      */

    //'csv.ignore-parse-errors' = 'true'  csv格式的异常数据不处理直接跳过
    bsTableEnv.executeSql(
      """
        |
        |CREATE TABLE kafkaTable (
        |line String
        |) WITH (
        | 'connector' = 'kafka',
        | 'topic' = 'test_topicl',
        | 'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        | 'properties.group.id' = 'csw',
        | 'format' = 'csv',
        | 'csv.ignore-parse-errors' = 'true',
        | 'scan.startup.mode' = 'earliest-offset'
        |)
        |
      """.stripMargin)


    /**
      * 创建sink表
      *
      */

    bsTableEnv.executeSql(
      """
        |
        |CREATE TABLE mysql_count (
        |  line String,
        |  c bigint,
        |  PRIMARY KEY (line) NOT ENFORCED
        |) WITH (
        |   'connector' = 'jdbc',
        |   'url' = 'jdbc:mysql://master:3306/csw',
        |   'table-name' = 'word_count',
        |   'username' = 'root',
        |   'password' = '123456'
        |
        |)
        |
      """.stripMargin)

    /**
      * 编写sql处理数据，将处理过的数据保存到sink表中
      *
      * 查询语句中的返回列名必须和sink表的列名一致，类型也是一样的 必须一致
      */

    bsTableEnv.executeSql(
      """
        |
        |insert into mysql_count
        |select line,count(1) as c from kafkaTable group by line
        |
      """.stripMargin)

//    /**
//      * 编写sql处理数据直接打印
//      */
//    val countTable: Table = bsTableEnv.sqlQuery(
//      """
//        |
//        |select line,count(1) as c from kafkaTable group by line
//        |
//      """.stripMargin)
//
//
//    countTable.toRetractStream[Row].print()
//
//    如果没有使用DAtaStream 的api不需要启动
//    env.execute()
  }
}
