package com.shujia.ads

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.catalog.hive.HiveCatalog

/*
 直接对dwd_kafka_weibo_msk 按照 source 分组，直接count
 */
object Demo2ComputeSourceIndex {
  def main(args: Array[String]): Unit = {
    val bsEnv = StreamExecutionEnvironment.getExecutionEnvironment
    val bsSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build()

    val bsTableEnv = StreamTableEnvironment.create(bsEnv, bsSettings)

    val hiveCatalog = new HiveCatalog("myHive", "sent", "SentimentCompute/src/main/resources")

    bsTableEnv.registerCatalog("myHive", hiveCatalog)
    bsTableEnv.useCatalog("myHive")

    bsTableEnv.executeSql(
      """
        |insert into ads.ads_mysql_source_index
        |select
        |source
        |,count(*) as c
        |from dwd.dwd_kafka_weibo_msk
        |group by source
        |""".stripMargin)

    bsEnv.execute()

  }

}
