package com.shujia.dwd

import org.apache.flink.streaming.api.scala._
import org.apache.flink.table.api.{EnvironmentSettings, _}
import org.apache.flink.table.api.bridge.scala.{StreamTableEnvironment, _}
import org.apache.flink.table.catalog.hive.HiveCatalog
import org.apache.flink.types.Row

object OdsCommentEtl {
  def main(args: Array[String]): Unit = {


    val bsEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val bsSettings: EnvironmentSettings = EnvironmentSettings
      .newInstance()
      .useBlinkPlanner() //使用blink计划器
      .inStreamingMode() //流处理模型
      .build()


    //创建table环境
    val bsTableEnv: StreamTableEnvironment = StreamTableEnvironment.create(bsEnv, bsSettings)

    val hiveCatalog: HiveCatalog = new HiveCatalog("myHive", "sent", "sentcompute/src/main/resources")

    bsTableEnv.registerCatalog("myHive", hiveCatalog)

    bsTableEnv.useCatalog("myHive")

    val table: Table = bsTableEnv.sqlQuery(
      """
        |select distinct comment_id,comment_time,like_count,comment_text,
        |md5(cast(user_id as string)) as user_id ,weibo_id
        |from ods.ods_kafka_comment
        |
      """.stripMargin)

    /**
      * 转换瑞的性质，update ---> append
      *
      */
    val ds: DataStream[(Boolean, Row)] = table.toRetractStream[Row]

    val rowTable: Table = ds.toTable(bsTableEnv, $"flag", $"r")
      .select($"r")

    bsTableEnv.createTemporaryView("tmp_table", rowTable)

    bsTableEnv.executeSql(
      """
        |insert into dwd.dwd_kafka_comment_msk
        |select r.comment_id,r.comment_time,r.like_count,r.comment_text,r.user_id,r.weibo_id from tmp_table
        |
      """.stripMargin)

    bsEnv.execute()


  }
}
