package com.shujia.stream.card

import org.apache.flink.table.api.{EnvironmentSettings, TableConfig, TableEnvironment}

import scala.io.{BufferedSource, Source}

object FlinkSQLRun {
  def main(args: Array[String]): Unit = {

    val sqlFIlePath: String = args.head

    //环境设置对象
    val settings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      //指定处理模式
      .inStreamingMode() //流处理模式
      //.inBatchMode()//批处理模式
      .build()

    //flink sql的执行环境
    val tEnv: TableEnvironment = TableEnvironment.create(settings)

    val config: TableConfig = tEnv.getConfig

    //增加优化参数
    config.set("table.exec.mini-batch.enabled", "true")
    config.set("table.exec.mini-batch.allow-latency", "5s")
    config.set("table.exec.mini-batch.size", "5000")
    config.set("table.optimizer.agg-phase-strategy", "TWO_PHASE")

    /**
     * 读取sql文件,循环执行里面的sql
     *
     */
    val source: BufferedSource = Source.fromFile(sqlFIlePath)

    val sqls: Array[String] = source
      .getLines()
      .toList
      .mkString("\n")
      .split(";")

    for (sql <- sqls) {
      tEnv.executeSql(sql)
    }
  }
}
