package com.shujia.ads

import com.shujia.udf.StrUDF
import com.shujia.util.IKUtil
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.catalog.hive.HiveCatalog
import org.apache.flink.types.Row

/*
从 dwd_kafka_comment_msk 中把评论 comment_text 取出来，
然后进行 分词（IK分词器）
然后按照 word 分组，做count（基于word Count原理）
 */
object Demo4ComputeWordCloundIndex {
  def main(args: Array[String]): Unit = {
    val bsEnv = StreamExecutionEnvironment.getExecutionEnvironment
    val bsSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build()

    val bsTableEnv = StreamTableEnvironment.create(bsEnv, bsSettings)

    val hiveCatalog = new HiveCatalog("myHive", "sent", "SentimentCompute/src/main/resources")

    bsTableEnv.registerCatalog("myHive", hiveCatalog)
    bsTableEnv.useCatalog("myHive")
    bsTableEnv.createTemporarySystemFunction("strUDF", classOf[StrUDF])

    val commentTable = bsTableEnv.sqlQuery(
      """
        |select strUDF(comment_text) as comment_text from dwd.dwd_kafka_comment_msk
        |""".stripMargin)


    val commentDS = commentTable.toAppendStream[Row]
      .map(row => row.getField(0).toString)
      .flatMap(line => IKUtil.fit(line))
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)

    bsTableEnv.fromDataStream(commentDS).executeInsert("ads.ads_mysql_word_index")

    bsEnv.execute()
  }
}
