package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._

import org.apache.flink.table.api.{EnvironmentSettings, Table}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.types.Row

object DEmo1SqlWordCount {

  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode()
      .build()

    /**
      * 构建table 环境
      *
      */
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env, bsSettings)


    /**
      * 创建source 表
      * 表的元数据限制放在j ob manager 的内存中
      *
      */

    bsTableEnv.executeSql(
      """
        |
        |CREATE TABLE kafkaTable (
        | line String
        |) WITH (
        | 'connector' = 'kafka',
        | 'topic' = 'test_topic1',
        | 'properties.bootstrap.servers' = 'master:9092,node1:9092,node2:9092',
        | 'properties.group.id' = 'testGroup1',
        | 'format' = 'csv',
        | 'csv.ignore-parse-errors' = 'true',
        | 'scan.startup.mode' = 'earliest-offset'
        |)
        |
      """.stripMargin)


    /**
      * 创建sink 表
      *
      * 需要先到mysql中创建word_count表   flink端的表的列名必须和mysqldu端表的列名要一致
      *
      */

    bsTableEnv.executeSql(
      """
        |CREATE TABLE mysql_count (
        |  word STRING,
        |  c BIGINT,
        |  PRIMARY KEY (word) NOT ENFORCED
        |) WITH (
        |   'connector' = 'jdbc',
        |   'url' = 'jdbc:mysql://master:3306/student',
        |   'table-name' = 'word_count',
        |   'username' = 'root',
        |   'password'= '123456'
        |)
        |
      """.stripMargin)


    /**
      * 编写sql 处理数据 ，将而己过保存到sink表中
      * 查询语句中的返回列名必须和sink表额列名一致，类型也必须一致
      *
      */

    bsTableEnv.executeSql(
      """
        |insert into mysql_count
        |select line as word,count(1) as c from kafkaTable  group by line
        |
      """.stripMargin)


    /**
      * 编写sql 处理数据
      *
      */

      val countTable: Table = bsTableEnv.sqlQuery(
        """
          |select line,count(1) as c from kafkaTable  group by line
          |
          """.stripMargin)


      countTable.toRetractStream[Row].print()


    //如果没有使用DAtaStream 的api不需要启动
    env.execute()


  }

}
