package com.shujia.compute

import com.shujia.util.IkUtil
import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{EnvironmentSettings, Table}
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.table.api._
import org.apache.flink.table.catalog.hive.HiveCatalog
import org.apache.flink.types.Row

/**
  * 统计dwd层评论表中所有评论中出现的各词语的出现频率，然后将结果表存入mysql中。
  */
object Demo2ComputeWordCloudIndex {

  def main(args: Array[String]): Unit = {

    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode() //流处理模型
      .build()


    //创建table环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)

    val hiveCatalog: HiveCatalog = new HiveCatalog("myHive", "sent", "sentcompute/src/main/resources")

    bsTableEnv.registerCatalog("myHive", hiveCatalog)

    bsTableEnv.useCatalog("myHive")

    //这里先查询dwd层的评论表，把评论信息这列单独抽出来形成一个只有一列的表
    val sentCommentTable: Table = bsTableEnv.sqlQuery(
      """
        |select comment_text from dwd.dwd_kafka_comment_msk
        |
      """.stripMargin)

    //从sentCommentTable表获取数据流（sentCommentTable是追加表）
    val sentCommentDS: DataStream[Row] = bsTableEnv.toAppendStream[Row](sentCommentTable)


    val countDS: DataStream[(String, Int)] = sentCommentDS
      .map(row => row.getField(0).toString) //将第一列转成string类型
      .flatMap(line => IkUtil.fit(line)) //分词，把一句话中的词语分出来，“，”分隔，再flatMap
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)

    //将datastream直接转成表结构
    bsTableEnv
      .fromDataStream(countDS, $"word", $"c")
      .executeInsert("ads.ads_mysql_word_index")


    bsEnv.execute()

  }

}
