package com.shujia.flink.sql

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.table.catalog.hive.HiveCatalog

object Demo12OnHIve {
  def main(args: Array[String]): Unit = {

    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode() //使用流模型
      .build()

    //创建flink sql的执行环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)

    val name = "myhive"
    val defaultDatabase = "default"
    val hiveConfDir = "/usr/local/soft/hive-1.2.1/conf" // a local path

    val hive = new HiveCatalog(name, defaultDatabase, hiveConfDir)
    //注册元数据
    bsTableEnv.registerCatalog("myhive", hive)

    //切换元数据
    bsTableEnv.useCatalog("myhive")

    /**
      * 使用hive中已经存在的表直接编写sql处理数据
      *
      */

    bsTableEnv.executeSql(
      """
        |insert into topn
        |
        |select * from(
        |select word,end_time,c,row_number() over(partition by end_time order by c desc) as r from
        |(
        |select
        |word,
        |TUMBLE_END(ts, INTERVAL '5' SECOND) end_time,
        |count(1) c
        |from
        |words
        |group by word,TUMBLE(ts, INTERVAL '5' SECOND)
        |) as a
        |) as b
        |where r<=2
        |
      """.stripMargin)


    /**
      * 如果表已存在，在查询的时候需要修改表的参数可以使用hint,hint放在每个表的后面
      * set table.dynamic-table-options.enabled=true;
      * select * from words  /*+ OPTIONS('scan.startup.mode'='earliest-offset') */;
      *
      */

  }

}
