package com.hrt.flink.hudi

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment

/**
 * Flink从Hudi读取数据
 */
object FlinkReadDataFromHudi {
  def main(args: Array[String]): Unit = {
    //1.创建对象
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env)
    tableEnv.executeSql(
      """
        |CREATE TABLE t1(
        |  id VARCHAR(20) PRIMARY KEY NOT ENFORCED,--默认主键列为uuid,这里可以后面跟上“PRIMARY KEY NOT ENFORCED”指定为主键列
        |  name VARCHAR(10),
        |  age INT,
        |  ts VARCHAR(20),
        |  loc VARCHAR(20)
        |)
        |PARTITIONED BY (loc)
        |WITH (
        |  'connector' = 'hudi',
        |  'path' = 'hdfs://hadoop102:8020/flink_hudi_data',
        |  'write.tasks' = '1', -- default is 4 ,required more resource
        |  'compaction.tasks' = '1', -- default is 10 ,required more resource
        |  'table.type' = 'COPY_ON_WRITE' -- this creates a MERGE_ON_READ table, by default is COPY_ON_WRITE
        |)
      """.stripMargin)

    tableEnv.executeSql(
      s"""
         | select name from t1
      """.stripMargin).print()

    env.execute()
  }
}
