package com.shujia.ads

import com.shujia.util.IkUtil
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{EnvironmentSettings, Table}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.table.api._
import org.apache.flink.table.catalog.hive.HiveCatalog
import org.apache.flink.types.Row

object Demo2ComputeWordCloudIndex {

  def main(args: Array[String]): Unit = {

    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode() //流处理模型
      .build()


    //创建table环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)

    val hiveCatalog: HiveCatalog = new HiveCatalog("myHive", "sent", "sentcompute/src/main/resources")

    bsTableEnv.registerCatalog("myHive", hiveCatalog)

    bsTableEnv.useCatalog("myHive")


    val sentCOmmentTable: Table = bsTableEnv.sqlQuery("select comment_text from dwd.dwd_kafka_comment_msk")


    val sentCommentDS: DataStream[Row] = bsTableEnv.toAppendStream[Row](sentCOmmentTable)


    val countDS: DataStream[(String, Int)] = sentCommentDS
      .map(row => row.getField(0).toString)
      .flatMap(line => IkUtil.fit(line)) //分词
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)


    bsTableEnv
      .fromDataStream(countDS, $"word", $"c")
      .executeInsert("ads.ads_mysql_word_index")


    bsEnv.execute()

  }

}
